max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
app/operational/admin/__init__.py
|
Anioko/reusable
| 0
|
12782651
|
<filename>app/operational/admin/__init__.py
from app.operational.admin.views import admin # noqa
from app.operational.admin.users import admin # noqa
from app.operational.admin.contact import admin # noqa
from app.operational.admin.payment import admin # noqa
from app.operational.admin.pricingplan import admin # noqa
from app.operational.admin.subscription import admin # noqa
from app.operational.admin.organisation import admin # noqa
from app.operational.admin.message import admin # noqa
from app.operational.admin.texts import admin # noqa
from app.operational.admin.marketplace import admin # noqa
from app.operational.admin.frontend import admin # noqa
| 1.195313
| 1
|
convert_grayscale.py
|
Programista3/Python-OpenCV-Examples
| 0
|
12782652
|
<reponame>Programista3/Python-OpenCV-Examples
import cv2
image = cv2.imread('media/Leaves.jpg', 1) # Load a color image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert image to grayscale
cv2.imshow('Image in grayscale', gray) # Show image
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3.78125
| 4
|
src/profiling_command.py
|
chilin0525/model-layer-profiling
| 0
|
12782653
|
<gh_stars>0
def generate_command(model_type, gpu_idx, command):
dlprof = "dlprof --reports all" + \
" --force=true" + \
" --mode=" + model_type + \
" --formats=json" + \
" --output_path=log " + command
return dlprof
| 1.75
| 2
|
src/striga/service/sitebus/_stsvcsb_view.py
|
ateska/striga
| 0
|
12782654
|
import os, logging as L
import striga.core.exception
import striga.server.application
from ._stsvcsb_utils import PathLimiter, LoabableObject
###
class View(PathLimiter, LoabableObject):
'''
Process bus object that executes Striga views
'''
def __init__(self, rootdir, source, mode, entry = 'main', pathlimit = '==0'):
app = striga.server.application.GetInstance()
loadable = app.Services.Loader.LoadStrigaFile(os.path.abspath(os.path.join(rootdir, source)), buildmode = mode, doload = False)
PathLimiter.__init__(self, pathlimit)
LoabableObject.__init__(self, loadable)
self.Entry = entry
self.EntryPoint = None
def __call__(self, ctx, path, *args, **kwargs):
self.CheckPath(path)
if self.EntryPoint is None:
#TODO: Here is a correct place for lazy loading (when self.LoabableObject.IsLoaded is False and self.LoabableObject.GetError is None)
# - launch worker that will call self.LoabableObject.Load() (retry option bellow must be implemented)
#TODO: Implement error reporting (Striga file is not loaded - can contain error)
#TODO: Handle a possiblity that loader is still running (error in self.LoabableObject is None)
# - wait for some reasonable amount of time and retry
if self.LoabableObject.IsLoaded():
L.warning("Striga view file '%s' is loaded but is doesn't provide striga view interface" % (str(self.LoabableObject)))
raise striga.core.exception.StrigaBusError('NotFound')
L.warning("Striga view '%s' is not loaded (yet) - it is in status '%s'" % (str(self.LoabableObject), self.LoabableObject.GetStatusString()))
raise striga.core.exception.StrigaBusError('NotLoaded')
ctx.res.SetContentType('text/html')
ctx.res.SetCacheAge(0)
out = self.LoabableObject.OutClass(ctx.res.Write)
self.EntryPoint(ctx, out, *args, **kwargs)
def _OnLOLoaded(self, strigafile):
self.EntryPoint = None
module = self.LoabableObject.GetModule()
if not hasattr(module, self.Entry):
L.warning("Striga file '%s' do not contain entry point '%s'" % (str(self.LoabableObject), self.Entry))
return
EntryPoint = getattr(module, self.Entry)
if not callable(EntryPoint):
L.warning("Striga file '%s' entry point '%s' is not callable" % (str(self.LoabableObject), self.Entry))
return
if not hasattr(EntryPoint ,'StrigaViewEntry'):
L.warning("Striga file '%s' entry point '%s' is not Striga entry (use decorator @StrigaViewEntry)" % (str(self.LoabableObject), self.Entry))
return
self.EntryPoint = EntryPoint
L.info("Striga view '%s' loaded" % str(strigafile))
def _OnLOFailed(self, strigafile):
self.EntryPoint = None
L.info("Striga view '%s' unloaded" % str(strigafile))
| 2.078125
| 2
|
emulator/emulatorui/stack.py
|
joshwatson/f-ing-around-with-binaryninja
| 88
|
12782655
|
<reponame>joshwatson/f-ing-around-with-binaryninja
from binaryninja import (BinaryDataNotification, BinaryReader,
BinaryView, BinaryViewType, Settings)
from PySide2.QtCore import QAbstractTableModel, Qt
from PySide2.QtGui import QFont
from PySide2.QtWidgets import QHeaderView, QTableView
class EmulatorStackModel(QAbstractTableModel, BinaryDataNotification):
def __init__(self, view: BinaryView):
QAbstractTableModel.__init__(self)
BinaryDataNotification.__init__(self)
try:
self.view = view
self.view.session_data['emulator.stack.model'] = self
self.memory_view = view.session_data.get('emulator.memory.view')
self.font_name = Settings().get_string('ui.font.name')
self.font_size = Settings().get_integer('ui.font.size')
if self.memory_view is None:
return
self.memory_view.register_notification(self)
self.br = BinaryReader(self.memory_view, self.view.endianness)
if self.view.address_size == 1:
self.br.read_ptr = self.br.read8
elif self.view.address_size == 2:
self.br.read_ptr = self.br.read16
elif self.view.address_size == 4:
self.br.read_ptr = self.br.read32
elif self.view.address_size == 8:
self.br.read_ptr = self.br.read64
except Exception as e:
print(e.msg)
self.stack = []
def rowCount(self, parent):
return 0x100 / self.view.address_size
def columnCount(self, parent):
return 2
def data(self, index, role=Qt.DisplayRole):
if role == Qt.CheckStateRole:
return None
if role == Qt.FontRole:
return QFont(self.font_name, self.font_size)
size = len(self.stack)
if 0 == size or size < index.row():
return
return hex(self.stack[index.row()][index.column()])
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Orientation.Vertical:
return None
if role != Qt.DisplayRole:
return None
return ['Address', 'Value'][
section
]
def setData(self, index, value, role=Qt.EditRole):
if self.view.arch is None:
return False
emulator = self.view.session_data['emulator']
if value.startswith('0x'):
try:
value = int(value, 16)
except ValueError:
return False
elif value.isnumeric():
value = int(value)
else:
return False
offset = self.stack[index.row()][0]
emulator.write_memory(offset, value, self.view.address_size)
return True
def flags(self, index):
if index.column() == 1:
return Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
return Qt.NoItemFlags
def data_written(self, view: BinaryView, offset: int, length: int) -> None:
sp = self.view.arch.stack_pointer
emulator = self.view.session_data['emulator']
try:
stack_pointer = emulator.read_register(sp)
except Exception as e:
print(e)
self.stack = []
return
if offset > stack_pointer + 0x100 or offset < stack_pointer:
return
self.update(stack_pointer)
def update(self, stack_pointer):
self.beginResetModel()
self.br.seek(stack_pointer)
self.stack = []
for i in range(0, 0x100, self.view.address_size):
self.stack.append((self.br.offset, self.br.read_ptr()))
self.endResetModel()
class EmulatorStackView(QTableView):
def __init__(self, parent, view):
super().__init__(parent)
self.parent = parent
self.view = view
self.view.session_data['emulator.stack.widget'] = self
self.verticalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
| 2.265625
| 2
|
lib/galaxy/job_metrics/formatting.py
|
rikeshi/galaxy
| 1,085
|
12782656
|
<reponame>rikeshi/galaxy
"""Utilities related to formatting job metrics for human consumption."""
class JobMetricFormatter:
"""Format job metric key-value pairs for human consumption in Web UI."""
def format(self, key, value):
return (str(key), str(value))
def seconds_to_str(value):
"""Convert seconds to a simple simple string describing the amount of time."""
mins, secs = divmod(value, 60)
hours, mins = divmod(mins, 60)
if value < 60:
return f"{secs} second{'s' if secs != 1 else ''}"
elif value < 3600:
return f"{mins} minute{'s' if mins != 1 else ''}"
else:
return f"{hours} hour{'s' if hours != 1 else ''} and {mins} minute{'s' if mins != 1 else ''}"
| 3.078125
| 3
|
condolence_models/common.py
|
naitian/condolence-models
| 2
|
12782657
|
<reponame>naitian/condolence-models<filename>condolence_models/common.py<gh_stars>1-10
import logging
import os
import shutil
import tempfile
from itertools import islice
import requests
import torch
from tqdm import tqdm
from .bert_classifier.classifier import BertClassifier
from .bert_classifier.utils import preprocess
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def load_model(model_type, path, device):
if not os.path.isfile(path):
logger.info(f'Model {model_type} does not exist at {path}. Attempting to download it.')
model = f'{model_type}_model'
fetch_pretrained_model(model, path)
model = BertClassifier(2)
model.load_state_dict(torch.load(path, map_location=device))
model.to(device)
model.eval()
return model
def fetch_pretrained_model(model, model_path):
PRETRAINED_MODEL_ARCHIVE_MAP = {
'condolence_model': [
'http://www-personal.umich.edu/~naitian/files/condolence_model.pth',
'http://jurgens.people.si.umich.edu/models/condolence_model.pth'
],
'distress_model': [
'http://www-personal.umich.edu/~naitian/files/distress_model.pth',
'http://jurgens.people.si.umich.edu/models/distress_model.pth'
],
'empathy_model': [
'http://www-personal.umich.edu/~naitian/files/empathy.tar.gz',
'http://jurgens.people.si.umich.edu/models/empathy_model.tar.gz'
],
}
assert model in PRETRAINED_MODEL_ARCHIVE_MAP
model_urls = PRETRAINED_MODEL_ARCHIVE_MAP[model]
model_urls = PRETRAINED_MODEL_ARCHIVE_MAP[model]
download_flag = False
for idx, model_url in enumerate(model_urls):
try:
temp_file = tempfile.NamedTemporaryFile()
logger.info(f'{model_path} not found in cache, downloading from {model_url} to {temp_file.name}')
req = requests.get(model_url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="KB", total=round(total / 1024))
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(1)
temp_file.write(chunk)
progress.close()
temp_file.flush()
temp_file.seek(0)
download_flag = True
break
except Exception as e:
logger.warning(f'Download from {idx + 1}/{len(model_urls)} mirror failed with an exception of\n{str(e)}')
try:
temp_file.close()
except Exception as e_file:
logger.warning(f'temp_file failed with an exception of \n{str(e_file)}')
continue
if not download_flag:
logging.warning(f'Download from all mirrors failed. Please retry.')
return
logger.info(f'Model {model} was downloaded to a tmp file.')
logger.info(f'Copying tmp file to {model_path}.')
os.makedirs(os.path.dirname(model_path), exist_ok=True)
with open(model_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info(f'Copied tmp model file to {model_path}.')
temp_file.close()
def batch_generator(iterable, tokenizer, batch_size=16, device="cpu"):
ct = iterable
piece = islice(ct, batch_size)
while piece:
input_list = []
for text in piece:
tokens = tokenizer.tokenize(preprocess(text))
tokens = tokens[:128]
indexed_toks = tokenizer.convert_tokens_to_ids(tokens)
indexed_toks += [0] * (128 - len(indexed_toks))
input_list.append(torch.tensor(indexed_toks).unsqueeze(0))
if len(input_list) == 0:
return
batch_inputs = torch.cat(input_list, 0)
yield batch_inputs.to(device)
piee = islice(ct, batch_size)
| 2.21875
| 2
|
recipes/binutils/all/conanfile.py
|
dyndrite/conan-center-index
| 1
|
12782658
|
from conans import ConanFile, AutoToolsBuildEnvironment, tools
from conans.errors import ConanInvalidConfiguration
import functools
import os
import re
import typing
import unittest
required_conan_version = ">=1.43.0"
# This recipe includes a selftest to test conversion of os/arch to triplets (and vice verse)
# Run it using `python -m unittest conanfile.py`
class BinutilsConan(ConanFile):
name = "binutils"
description = "The GNU Binutils are a collection of binary tools."
license = "GPL-2.0-or-later"
url = "https://github.com/conan-io/conan-center-index/"
homepage = "https://www.gnu.org/software/binutils"
topics = ("binutils", "ld", "linker", "as", "assembler", "objcopy", "objdump")
settings = "os", "arch", "compiler", "build_type"
_PLACEHOLDER_TEXT = "__PLACEHOLDER__"
options = {
"multilib": [True, False],
"with_libquadmath": [True, False],
"target_arch": "ANY",
"target_os": "ANY",
"target_triplet": "ANY",
"prefix": "ANY",
}
default_options = {
"multilib": True,
"with_libquadmath": True,
"target_arch": _PLACEHOLDER_TEXT, # Initialized in configure, checked in validate
"target_os": _PLACEHOLDER_TEXT, # Initialized in configure, checked in validate
"target_triplet": _PLACEHOLDER_TEXT, # Initialized in configure, checked in validate
"prefix": _PLACEHOLDER_TEXT, # Initialized in configure (NOT config_options, because it depends on target_{arch,os})
}
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
@property
def _settings_target(self):
return getattr(self, "settings_target", None) or self.settings
def export_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
del self.settings.compiler.cppstd
del self.settings.compiler.libcxx
def configure(self):
if self.options.target_triplet == self._PLACEHOLDER_TEXT:
if self.options.target_arch == self._PLACEHOLDER_TEXT:
# If target triplet and target arch are not set, initialize it from the target settings
self.options.target_arch = str(self._settings_target.arch)
if self.options.target_os == self._PLACEHOLDER_TEXT:
# If target triplet and target os are not set, initialize it from the target settings
self.options.target_os = str(self._settings_target.os)
# Initialize the target_triplet from the target arch and target os
self.options.target_triplet = _GNUTriplet.from_archos(_ArchOs(arch=str(self.options.target_arch), os=str(self.options.target_os), extra=dict(self._settings_target.values_list))).triplet
else:
gnu_triplet_obj = _GNUTriplet.from_text(str(self.options.target_triplet))
archos = _ArchOs.from_triplet(gnu_triplet_obj)
if self.options.target_arch == self._PLACEHOLDER_TEXT:
# If target arch is not set, deduce it from the target triplet
self.options.target_arch = archos.arch
if self.options.target_os == self._PLACEHOLDER_TEXT:
# If target arch is not set, deduce it from the target triplet
self.options.target_os = archos.os
if self.options.prefix == self._PLACEHOLDER_TEXT:
self.options.prefix = f"{self.options.target_triplet}-"
self.output.info(f"binutils:target_arch={self.options.target_arch}")
self.output.info(f"binutils:target_os={self.options.target_os}")
self.output.info(f"binutils:target_triplet={self.options.target_triplet}")
def validate(self):
if self.settings.compiler in ("msvc", "Visual Studio"):
raise ConanInvalidConfiguration("This recipe does not support building binutils by this compiler")
if self.options.target_os == "Macos":
raise ConanInvalidConfiguration("cci does not support building binutils for Macos since binutils is degraded there (no as/ld + armv8 does not build)")
# Check whether the actual target_arch and target_os option are valid (they should be in settings.yml)
# FIXME: does there exist a stable Conan API to accomplish this?
if self.options.target_arch not in self.settings.arch.values_range:
raise ConanInvalidConfiguration(f"target_arch={self.options.target_arch} is invalid (possibilities={self.settings.arch.values_range})")
if self.options.target_os not in self.settings.os.values_range:
raise ConanInvalidConfiguration(f"target_os={self.options.target_os} is invalid (possibilities={self.settings.os.values_range})")
target_archos = _ArchOs(str(self.options.target_arch), str(self.options.target_os))
target_gnu_triplet = _GNUTriplet.from_text(str(self.options.target_triplet))
if not target_archos.is_compatible(target_gnu_triplet):
suggested_gnu_triplet = _GNUTriplet.from_archos(target_archos)
suggested_archos = _ArchOs.from_triplet(target_gnu_triplet)
raise ConanInvalidConfiguration(f"target_arch={target_archos.arch}/target_os={target_archos.os} is not compatible with {target_gnu_triplet.triplet}. Change target triplet to {suggested_gnu_triplet.triplet}, or change target_arch/target_os to {suggested_archos.arch}/{suggested_archos.os}.")
# Check, when used as build requirement in a cross build, whether the target arch/os agree
settings_target = getattr(self, "settings_target", None)
if settings_target is not None:
if self.options.target_arch != settings_target.arch:
raise ConanInvalidConfiguration(f"binutils:target_arch={self.options.target_arch} does not match target architecture={settings_target.arch}")
if self.options.target_os != settings_target.os:
raise ConanInvalidConfiguration(f"binutils:target_os={self.options.target_os} does not match target os={settings_target.os}")
def package_id(self):
del self.info.settings.compiler
def _raise_unsupported_configuration(self, key, value):
raise ConanInvalidConfiguration(f"This configuration is unsupported by this conan recip. Please consider adding support. ({key}={value})")
def build_requirements(self):
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def requirements(self):
self.requires("zlib/1.2.12")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
strip_root=True, destination=self._source_subfolder)
@property
def _exec_prefix(self):
return os.path.join(self.package_folder, "bin", "exec_prefix")
@functools.lru_cache(1)
def _configure_autotools(self):
autotools = AutoToolsBuildEnvironment(self, win_bash=self._settings_build.os == "Windows")
yes_no = lambda tf : "yes" if tf else "no"
conf_args = [
f"--target={self.options.target_triplet}",
f"--enable-multilib={yes_no(self.options.multilib)}",
"--with-system-zlib",
"--disable-nls",
f"--program-prefix={self.options.prefix}",
f"exec_prefix={tools.unix_path(self._exec_prefix)}",
]
autotools.configure(args=conf_args, configure_dir=self._source_subfolder)
return autotools
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
autotools = self._configure_autotools()
autotools.make()
def package(self):
self.copy("COPYING*", src=self._source_subfolder, dst="licenses")
autotools = self._configure_autotools()
autotools.install()
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.la")
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bindir))
self.env_info.PATH.append(bindir)
target_bindir = os.path.join(self._exec_prefix, str(self.options.target_triplet), "bin")
self.output.info("Appending PATH environment variable: {}".format(target_bindir))
self.env_info.PATH.append(target_bindir)
self.output.info(f"GNU triplet={self.options.target_triplet}")
self.user_info.gnu_triplet = self.options.target_triplet
self.output.info(f"executable prefix={self.options.prefix}")
self.user_info.prefix = self.options.prefix
# Add recipe path to enable running the self test in the test package.
# Don't use this property in production code. It's unsupported.
self.user_info.recipe_path = os.path.realpath(__file__)
class _ArchOs:
def __init__(self, arch: str, os: str, extra: typing.Optional[typing.Dict[str, str]]=None):
self.arch = arch
self.os = os
self.extra = extra if extra is not None else {}
def is_compatible(self, triplet: "_GNUTriplet") -> bool:
return self.arch in self.calculate_archs(triplet) and self.os == self.calculate_os(triplet)
_MACHINE_TO_ARCH_LUT = {
"arm": "armv7",
"aarch64": ("armv8", "armv9"),
"i386": "x86",
"i486": "x86",
"i586": "x86",
"i686": "x86",
"x86_64": "x86_64",
"riscv32": "riscv32",
"riscv64": "riscv64",
}
@classmethod
def calculate_archs(cls, triplet: "_GNUTriplet") -> typing.Tuple[str]:
if triplet.machine == "arm":
archs = "armv7" + ("hf" if "hf" in triplet.abi else "")
else:
archs = cls._MACHINE_TO_ARCH_LUT[triplet.machine]
if isinstance(archs, str):
archs = (archs, )
return archs
_GNU_OS_TO_OS_LUT = {
None: "baremetal",
"android": "Android",
"mingw32": "Windows",
"linux": "Linux",
"freebsd": "FreeBSD",
"darwin": "Macos",
"none": "baremetal",
"unknown": "baremetal",
}
@classmethod
def calculate_os(cls, triplet: "_GNUTriplet") -> str:
if triplet.abi and "android" in triplet.abi:
return "Android"
return cls._GNU_OS_TO_OS_LUT[triplet.os]
@classmethod
def from_triplet(cls, triplet: "_GNUTriplet") -> "_ArchOs":
archs = cls.calculate_archs(triplet)
os = cls.calculate_os(triplet)
extra = {}
if os == "Android" and triplet.abi:
m = re.match(".*([0-9]+)", triplet.abi)
if m:
extra["os.api_level"] = m.group(1)
# Assume first architecture
return cls(arch=archs[0], os=os, extra=extra)
def __eq__(self, other) -> bool:
if type(self) != type(other):
return False
if not (self.arch == other.arch and self.os == other.os):
return False
self_extra_keys = set(self.extra.keys())
other_extra_keys = set(other.extra.keys())
if (self_extra_keys - other_extra_keys) or (other_extra_keys - self_extra_keys):
return False
return True
def __repr__(self) -> str:
return f"<{type(self).__name__}:arch='{self.arch}',os='{self.os}',extra={self.extra}>"
class _GNUTriplet:
def __init__(self, machine: str, vendor: typing.Optional[str], os: typing.Optional[str], abi: typing.Optional[str]):
self.machine = machine
self.vendor = vendor
self.os = os
self.abi = abi
@property
def triplet(self) -> str:
return "-".join(p for p in (self.machine, self.vendor, self.os, self.abi) if p)
@classmethod
def from_archos(cls, archos: _ArchOs) -> "_GNUTriplet":
gnu_machine = cls.calculate_gnu_machine(archos)
gnu_vendor = cls.calculate_gnu_vendor(archos)
gnu_os = cls.calculate_gnu_os(archos)
gnu_abi = cls.calculate_gnu_abi(archos)
return cls(gnu_machine, gnu_vendor, gnu_os, gnu_abi)
@classmethod
def from_text(cls, text: str) -> "_GNUTriplet":
gnu_machine: str
gnu_vendor: typing.Optional[str]
gnu_os: typing.Optional[str]
gnu_abi: typing.Optional[str]
parts = text.split("-")
if not 2 <= len(parts) <= 4:
raise ValueError("Wrong number of GNU triplet components. Count must lie in range [2, 4]. format=$machine(-$vendor)?(-$os)?(-$abi)?")
gnu_machine = parts[0]
parts = parts[1:]
if any(v in parts[-1] for v in cls.KNOWN_GNU_ABIS):
gnu_abi = parts[-1]
parts = parts[:-1]
else:
gnu_abi = None
if len(parts) == 2:
gnu_vendor = parts[0]
gnu_os = parts[1]
elif len(parts) == 1:
if parts[0] in _GNUTriplet.UNKNOWN_OS_ALIASES:
gnu_vendor = None
gnu_os = parts[0]
elif parts[0] in cls.OS_TO_GNU_OS_LUT.values():
gnu_vendor = None
gnu_os = parts[0]
else:
gnu_vendor = parts[0]
gnu_os = None
else:
gnu_vendor = None
gnu_os = None
return cls(gnu_machine, gnu_vendor, gnu_os, gnu_abi)
ARCH_TO_GNU_MACHINE_LUT = {
"x86": "i686",
"x86_64": "x86_64",
"armv7": "arm",
"armv7hf": "arm",
"armv8": "aarch64",
"riscv32": "riscv32",
"riscv64": "riscv64",
}
@classmethod
def calculate_gnu_machine(cls, archos: _ArchOs) -> str:
return cls.ARCH_TO_GNU_MACHINE_LUT[archos.arch]
UNKNOWN_OS_ALIASES = (
"unknown",
"none",
)
OS_TO_GNU_OS_LUT = {
"baremetal": "none",
"Android": "linux",
"FreeBSD": "freebsd",
"Linux": "linux",
"Macos": "darwin",
"Windows": "mingw32",
}
@classmethod
def calculate_gnu_os(cls, archos: _ArchOs) -> typing.Optional[str]:
if archos.os in ("baremetal", ):
if archos.arch in ("x86", "x86_64", ):
return None
elif archos.arch in ("riscv32", "riscv64"):
return "unknown"
return cls.OS_TO_GNU_OS_LUT[archos.os]
OS_TO_GNU_VENDOR_LUT = {
"Windows": "w64",
"baremetal": None,
}
@classmethod
def calculate_gnu_vendor(cls, archos: _ArchOs) -> typing.Optional[str]:
if archos.os in ("baremetal", "Android"):
return None
if archos.os in ("Macos", "iOS", "tvOS", "watchOS"):
return "apple"
return cls.OS_TO_GNU_VENDOR_LUT.get(archos.os, "pc")
@classmethod
def calculate_gnu_abi(self, archos: _ArchOs) -> typing.Optional[str]:
if archos.os in ("baremetal", ):
if archos.arch in ("armv7",):
return "eabi"
else:
return "elf"
abi_start = None
if archos.os in ("Linux", ):
abi_start = "gnu"
elif archos.os in ("Android", ):
abi_start = "android"
else:
return None
if archos.arch in ("armv7",):
abi_suffix = "eabi"
elif archos.arch in ("armv7hf",):
abi_suffix = "eabihf"
else:
abi_suffix = ""
if archos.os in ("Android", ):
abi_suffix += str(archos.extra.get("os.api_level", ""))
return abi_start + abi_suffix
KNOWN_GNU_ABIS = (
"android",
"gnu",
"eabi",
"elf",
)
def __eq__(self, other: object) -> bool:
if type(self) != type(other):
return False
other: "_GNUTriplet"
return self.machine == other.machine and self.vendor == other.vendor and self.os == other.os and self.abi == other.abi
def __repr__(self) -> str:
def x(v):
if v is None:
return None
return f"'{v}'"
return f"<{type(self).__name__}:machine={x(self.machine)},vendor={x(self.vendor)},os={x(self.os)},abi={x(self.abi)}>"
class _TestOsArch2GNUTriplet(unittest.TestCase):
def test_linux_x86(self):
archos = _ArchOs(arch="x86", os="Linux")
self._test_osarch_to_gnutriplet(archos, _GNUTriplet(machine="i686", vendor="pc", os="linux", abi="gnu"), "i686-pc-linux-gnu")
self.assertEqual(_ArchOs("x86", "Linux"), _ArchOs.from_triplet(_GNUTriplet.from_text("i386-linux")))
self.assertEqual(_ArchOs("x86", "Linux"), _ArchOs.from_triplet(_GNUTriplet.from_text("i686-linux")))
self.assertEqual(_GNUTriplet("i486", None, "linux", None), _GNUTriplet.from_text("i486-linux"))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("i486-linux")))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("i486-linux-gnu")))
def test_linux_x86_64(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86_64", os="Linux"), _GNUTriplet(machine="x86_64", vendor="pc", os="linux", abi="gnu"), "x86_64-pc-linux-gnu")
def test_linux_armv7(self):
archos = _ArchOs(arch="armv7", os="Linux")
self._test_osarch_to_gnutriplet(archos, _GNUTriplet(machine="arm", vendor="pc", os="linux", abi="gnueabi"), "arm-pc-linux-gnueabi")
self.assertEqual(_GNUTriplet("arm", "pc", None, "gnueabi"), _GNUTriplet.from_text("arm-pc-gnueabi"))
self.assertEqual(_GNUTriplet("arm", "pc", None, "eabi"), _GNUTriplet.from_text("arm-pc-eabi"))
self.assertEqual(_ArchOs("armv7hf", "baremetal"), _ArchOs.from_triplet(_GNUTriplet.from_text("arm-pc-gnueabihf")))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("arm-linux-gnueabi")))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("arm-linux-eabi")))
self.assertFalse(archos.is_compatible(_GNUTriplet.from_text("arm-pc-linux-gnueabihf")))
self.assertFalse(archos.is_compatible(_GNUTriplet.from_text("arm-pc-gnueabihf")))
def test_linux_armv7hf(self):
archos = _ArchOs(arch="armv7hf", os="Linux")
self._test_osarch_to_gnutriplet(archos, _GNUTriplet(machine="arm", vendor="pc", os="linux", abi="gnueabihf"), "arm-pc-linux-gnueabihf")
self.assertEqual(_GNUTriplet("arm", "pc", None, "gnueabihf"), _GNUTriplet.from_text("arm-pc-gnueabihf"))
self.assertEqual(_ArchOs("armv7", "baremetal"), _ArchOs.from_triplet(_GNUTriplet.from_text("arm-pc-gnueabi")))
self.assertFalse(archos.is_compatible(_GNUTriplet.from_text("arm-linux-gnueabi")))
self.assertFalse(archos.is_compatible(_GNUTriplet.from_text("arm-linux-eabi")))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("arm-pc-linux-gnueabihf")))
self.assertFalse(archos.is_compatible(_GNUTriplet.from_text("arm-pc-gnueabihf")))
def test_windows_x86(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86", os="Windows"), _GNUTriplet(machine="i686", vendor="w64", os="mingw32", abi=None), "i686-w64-mingw32")
def test_windows_x86_64(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86_64", os="Windows"), _GNUTriplet(machine="x86_64", vendor="w64", os="mingw32", abi=None), "x86_64-w64-mingw32")
def test_macos_x86_64(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86_64", os="Macos"), _GNUTriplet(machine="x86_64", vendor="apple", os="darwin", abi=None), "x86_64-apple-darwin")
def test_freebsd_x86_64(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86_64", os="FreeBSD"), _GNUTriplet(machine="x86_64", vendor="pc", os="freebsd", abi=None), "x86_64-pc-freebsd")
def test_baremetal_x86(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86", os="baremetal"), _GNUTriplet(machine="i686", vendor=None, os=None, abi="elf"), "i686-elf")
def test_baremetal_x86_64(self):
archos = _ArchOs(arch="x86_64", os="baremetal")
self._test_osarch_to_gnutriplet(archos, _GNUTriplet(machine="x86_64", vendor=None, os=None, abi="elf"), "x86_64-elf")
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("x86_64-elf")))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("x86_64-none-elf")))
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("x86_64-unknown-elf")))
def test_baremetal_armv7(self):
archos = _ArchOs(arch="armv7", os="baremetal")
self._test_osarch_to_gnutriplet(archos, _GNUTriplet(machine="arm", vendor=None, os="none", abi="eabi"), "arm-none-eabi")
self.assertTrue(archos.is_compatible(_GNUTriplet.from_text("arm-none-eabi")))
def test_baremetal_armv8(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="armv8", os="baremetal"), _GNUTriplet(machine="aarch64", vendor=None, os="none", abi="elf"), "aarch64-none-elf")
def test_baremetal_riscv32(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="riscv32", os="baremetal"), _GNUTriplet(machine="riscv32", vendor=None, os="unknown", abi="elf"), "riscv32-unknown-elf")
def test_baremetal_riscv64(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="riscv64", os="baremetal"), _GNUTriplet(machine="riscv64", vendor=None, os="unknown", abi="elf"), "riscv64-unknown-elf")
def test_android_armv7(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="armv7", os="Android", extra={"os.api_level": "31"}), _GNUTriplet(machine="arm", vendor=None, os="linux", abi="androideabi31"), "arm-linux-androideabi31")
def test_android_armv8(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="armv8", os="Android", extra={"os.api_level": "24"}), _GNUTriplet(machine="aarch64", vendor=None, os="linux", abi="android24"), "aarch64-linux-android24")
def test_android_x86(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86", os="Android", extra={"os.api_level": "16"}), _GNUTriplet(machine="i686", vendor=None, os="linux", abi="android16"), "i686-linux-android16")
def test_android_x86_64(self):
self._test_osarch_to_gnutriplet(_ArchOs(arch="x86_64", os="Android", extra={"os.api_level": "29"}), _GNUTriplet(machine="x86_64", vendor=None, os="linux", abi="android29"), "x86_64-linux-android29")
self.assertEqual(_ArchOs(arch="x86_64", os="Android", extra={"os.api_level": "25"}), _ArchOs.from_triplet(_GNUTriplet.from_text("x86_64-linux-android29")))
def _test_osarch_to_gnutriplet(self, archos: _ArchOs, gnuobj_ref: _GNUTriplet, triplet_ref: str):
gnuobj = _GNUTriplet.from_archos(archos)
self.assertEqual(gnuobj_ref, gnuobj)
self.assertEqual(triplet_ref, gnuobj.triplet)
self.assertEqual(gnuobj_ref, _GNUTriplet.from_text(triplet_ref))
# self.assertEqual(triplet_ref, tools.get_gnu_triplet(archos.os, archos.arch, compiler="gcc"))
| 2.234375
| 2
|
tests/homework_1/task_1/test_vector.py
|
pyaiveoleg/semester_4_python
| 0
|
12782659
|
import unittest
from homeworks.homework_1.task_1.vector import Vector
class VectorTest(unittest.TestCase):
def test_empty_vector(self):
with self.assertRaises(ValueError):
self.assertEqual(Vector([]).length(), 0)
def test_int_length(self):
self.assertEqual(Vector([3, 4]).length(), 5)
def test_float_length(self):
self.assertAlmostEqual(Vector([0.1, 4, 3.5]).length(), 5.316013544)
def test_different_dimensions_scalar_product(self):
with self.assertRaises(ValueError):
Vector([1, 2]).scalar_product(Vector([1, 3, 4]))
def test_int_scalar_product(self):
self.assertEqual(Vector([2, 3]).scalar_product(Vector([1, 4])), 14)
def test_float_scalar_product(self):
first_v = Vector([3.5, 1.74, 0.896, 0.445])
second_v = Vector([1, -2.97, -1.065, -3.29])
self.assertAlmostEqual(first_v.scalar_product(second_v), -4.08609)
def test_self_scalar_product(self):
self.assertAlmostEqual(Vector([1, -2.97, -1.065]).scalar_product(Vector([1, -2.97, -1.065])), 10.955125)
def test_different_dimensions_angle(self):
with self.assertRaises(ValueError):
Vector([1, 2]).angle(Vector([1, 3, 4]))
def test_float_angle(self):
first_v = Vector([3.5, 1.74, 0.896, 0.445])
second_v = Vector([1, -2.97, -1.065, -3.29])
self.assertAlmostEqual(first_v.angle(second_v), 102.53349294109442)
def test_self_angle(self):
self.assertAlmostEqual(Vector([1, -2.97, -1.065]).angle(Vector([1, -2.97, -1.065])), 0.0)
| 3.890625
| 4
|
aliyun-python-sdk-nlp-automl/aliyunsdknlp_automl/__init__.py
|
yndu13/aliyun-openapi-python-sdk
| 1,001
|
12782660
|
__version__ = '0.0.9'
| 1.054688
| 1
|
tempstore/tempstore.py
|
ryanrichholt/tempstore
| 0
|
12782661
|
<filename>tempstore/tempstore.py
import os
import tempfile
import shutil
from collections import OrderedDict
class TempStore(object):
def __init__(self, name=None):
self.name = name
self.objs = OrderedDict()
self.dir = tempfile.TemporaryDirectory()
def cleanup(self):
"""Cleanup all objects in this TempStore
:return: None
"""
for obj in self.objs.values():
try:
obj.cleanup()
except AttributeError:
obj.close()
except FileNotFoundError:
pass
self.objs = OrderedDict()
@property
def paths(self):
"""Returns an OrderedDict that includes the paths to each object
:return: OrderedDict
"""
return OrderedDict([(n, obj.name) for n, obj in self.objs.items()])
def create(self, name):
"""Creates a new tempfile and returns the path
:param name: Name of the file to create
:return: str
"""
if name in self.objs:
raise ValueError('{} already exists'.format(name, self))
fp = tempfile.NamedTemporaryFile(dir=self.dir.name)
self.objs[name] = fp
return fp.name
def copy(self, path, exist_ok=False):
"""Copy all objects in the TempStore to another location
:param path: Path to a directory where they will be copied
:param exist_ok: Ignore errors if the path already exists
:return: None
"""
if self.name is not None:
path = os.path.join(path, self.name)
os.makedirs(path, exist_ok=exist_ok)
for name, obj in self.objs.items():
src = obj.name
dest = os.path.join(path, name)
shutil.copy(src, dest)
| 3.375
| 3
|
recipe/import_test.py
|
regro-cf-autotick-bot/yeadon-feedstock
| 22
|
12782662
|
import yeadon
import yeadon.exceptions
import yeadon.human
import yeadon.inertia
import yeadon.segment
import yeadon.solid
import yeadon.ui
import yeadon.utils
import yeadon.tests
try:
import yeadon.gui
except ImportError: # mayavi not installed
pass
| 0.882813
| 1
|
sdk/attestation/azure-security-attestation/tests/preparers_async.py
|
rsdoherty/azure-sdk-for-python
| 2,728
|
12782663
|
<filename>sdk/attestation/azure-security-attestation/tests/preparers_async.py<gh_stars>1000+
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Awaitable, List
from azure.security.attestation import AttestationType
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
from typing import Awaitable, Callable, Dict, Optional, Any, TypeVar, overload
T = TypeVar("T")
def AllAttestationTypes(func: Callable[..., Awaitable[T]] = None, **kwargs: Any):
"""Decorator to apply to function to add attestation_type kwarg for each attestation type."""
async def wrapper(*args, **kwargs) -> Callable[..., Awaitable[T]]:
for attestation_type in [
AttestationType.SGX_ENCLAVE,
AttestationType.OPEN_ENCLAVE,
AttestationType.TPM,
]:
await func(*args, attestation_type=attestation_type, **kwargs)
return wrapper
def AllInstanceTypes(
func: Callable[..., Awaitable[T]] = None, include_shared: bool = True, **kwargs: Any
):
"""Decorator to apply to function to add instance_url kwarg for each instance type."""
async def wrapper(*args, **kwargs) -> Callable[..., Awaitable[T]]:
instances = [] # type:List[str]
instances.append(kwargs.get("attestation_aad_url"))
instances.append(kwargs.get("attestation_isolated_url"))
if include_shared:
instances.append(
"https://shared"
+ kwargs.get("attestation_location_short_name")
+ "."
+ kwargs.get("attestation_location_short_name")
+ ".attest.azure.net"
)
for instance_url in instances:
await func(*args, instance_url=instance_url, **kwargs)
return wrapper
| 2.140625
| 2
|
glowtts/transformer/pe.py
|
revsic/tf-glow-tts
| 5
|
12782664
|
import numpy as np
import tensorflow as tf
class PositionalEncodings(tf.keras.Model):
"""Sinusoidal positional encoding generator.
"""
def __init__(self, channels: int, presize: int = 128):
"""Initializer.
Args:
channels: size of the channels.
presize: initial pe cache size.
"""
super().__init__()
self.channels = channels
self.size = presize
self.buffer = self.generate(presize)
def call(self, size: int) -> tf.Tensor:
"""Return cached positional encodings.
Args:
size: length of the pe.
Returns:
[tf.float32; [T, C]], sinusoidal positional encodings.
"""
if size <= self.size:
return self.buffer[:size]
# generate new cache
self.buffer = self.generate(size)
return self.buffer
def generate(self, size: int) -> tf.Tensor:
"""Generate positional encodings.
Args:
size: length of the pe.
Returns:
[tf.float32; [T, C]], sinusoidal positional encodings.
"""
# [tf.int32; [T]]
pos = tf.range(size)
# [tf.int32; [C//2]]
i = tf.range(0, self.channels, 2)
# [C//C], casting for float64
denom = tf.exp(-np.log(10000) * tf.cast(i / self.channels, tf.float32))
# [T, C//2]
context = tf.cast(pos, tf.float32)[:, None] * denom[None]
# [T, C//2, 1]
context = context[..., None]
# [T, C//2, 2]
pe = tf.concat([tf.sin(context), tf.cos(context)], axis=-1)
# [T, C]
pe = tf.reshape(pe, [size, self.channels])
return pe
| 2.46875
| 2
|
diskmth/DayGUI.py
|
Disk-MTH/How-to-install-python-on-windows
| 1
|
12782665
|
from tkinter import *
from PIL import Image, ImageTk
import Utils
import MainGUI
def day_gui(day_date):
# Create the frame
root = Tk()
# Initialisation of some useful variables
last_click_x = 0
last_click_y = 0
root_width = 700
root_height = 400
# Definition of some useful functions
def get_picture(path, is_day_picture):
if is_day_picture:
try:
picture = Image.open(Utils.get_resources_path("resources\\day\\day_" + str(day_date) + ".png"))
resized_picture = picture.resize((700, 350), Image.ANTIALIAS)
return ImageTk.PhotoImage(resized_picture)
except FileNotFoundError:
try:
return PhotoImage(file=Utils.get_resources_path("resources\\day\\not_found.png"))
except TclError:
pass
else:
try:
return PhotoImage(file=Utils.get_resources_path("resources\\" + path))
except TclError:
pass
def get_title(date):
if date == 1:
return "December 1st"
elif date == 2:
return "December 2nd"
elif date == 3:
return "December 3rd"
else:
return "December " + str(date) + "th"
def move_frame(event):
x, y = event.x - last_click_x + root.winfo_x(), event.y - last_click_y + root.winfo_y()
root.geometry("+%s+%s" % (x, y))
def mapped_frame(event):
root.overrideredirect(True)
def reduce_frame():
Utils.button_click_sound(False)
root.state('withdrawn')
root.overrideredirect(False)
root.state('iconic')
def close_frame():
Utils.button_click_sound(False)
root.destroy()
MainGUI.main_gui()
# Set basic parameters of frame
root.wm_attributes("-topmost", True)
root.geometry("700x400")
root.resizable(width=False, height=False)
root.iconbitmap(Utils.get_resources_path("resources\\icon\\app_icon.ico"))
root.bind("<Map>", mapped_frame)
# Add components to frame
label_background = Label(bg="white", width=700, height=400, bd=0)
label_background.place(x=0, y=0)
label_title = Label(text=get_title(day_date), font=("Segoe Script", 18), bd=0, bg="White")
label_title.place(x=root_width / 2 - label_title.winfo_reqwidth() / 2,
y=25 - label_title.winfo_reqheight() / 2)
label_move_area_picture = get_picture("day_move.png", False)
label_move_area = Label(image=label_move_area_picture, width=40, height=40, bd=0)
label_move_area.place(x=5, y=5)
label_move_area.bind("<B1-Motion>", move_frame)
button_reduce_picture = get_picture("buttons\\day_reduce.png", False)
button_reduce = Button(image=button_reduce_picture, bd=0, highlightthickness=0,
padx=40, pady=10, command=reduce_frame)
button_reduce.place(x=610, y=20)
button_close_picture = get_picture("buttons\\day_close.png", False)
button_close = Button(image=button_close_picture, bd=0, highlightthickness=0, padx=40, pady=40, command=close_frame)
button_close.place(x=655, y=5)
label_day_picture = get_picture(day_date, True)
label_day = Label(image=label_day_picture, width=700, height=350, bd=0)
label_day.place(x=0, y=50)
# Loop the frame
root.mainloop()
| 3.28125
| 3
|
icIceFonksiyon.py
|
cyrionp/PythonLectures
| 0
|
12782666
|
<filename>icIceFonksiyon.py
def greeting(name):
print("Hello ",name)
'''
print(greeting("Ali"))
sayHello=greeting
print(sayHello)
print(greeting)
del sayHello
print(greeting)
#print(sayHello)
#Encapsulation
def outer(num1):
def inner_increment(num1):
return num1+1
num2=inner_increment(num1)
print(num1,num2)
outer(10)
'''
def factorial(number):
if not isinstance(number,int):
raise TypeError("Number must be an integer")
if not number>=0:
raise ValueError("Number must be zero or positive")
def inner_factorial(number):
if number<=1: return 1
return number*inner_factorial(number-1)
return inner_factorial(number)
try: print(factorial(-2))
except Exception as ex: print(ex)
| 3.921875
| 4
|
graphscale/kvetch/create_db.py
|
schrockntemp/graphscaletemp
| 0
|
12782667
|
<filename>graphscale/kvetch/create_db.py
#import pymysql
#import pymysql.cursors;
#import pytest
#from kvetch_dbschema import create_kvetch_edges_table_sql
#
#
#
#if __name__ == "__main__":
# conn = pymysql.connect(host='localhost',
# user='magnus',
# password='<PASSWORD>',
# db='unittest_mysql_db',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
# sql = create_kvetch_edges_table_sql('test')
# print(sql)
# with conn.cursor() as cursor:
# cursor.execute(sql)
# conn.commit()
#
| 2
| 2
|
core/__init__.py
|
BenSmithers/MultiHex2
| 0
|
12782668
|
from .coordinates import HexID, hex_to_screen, screen_to_hex
from .core import Hex,Region, Catalog, RegionCatalog, EntityCatalog
from .map_entities import Entity, Government, Settlement
from .core import DRAWSIZE
| 1.054688
| 1
|
mrec/evaluation/__init__.py
|
imall100/mrec
| 392
|
12782669
|
class Evaluator(object):
"""
Compute metrics for recommendations that have been written to file.
Parameters
----------
compute_metrics : function(list,list)
The evaluation function which should accept two lists of predicted
and actual item indices.
max_items : int
The number of recommendations needed to compute the evaluation function.
"""
def __init__(self,compute_metrics,max_items):
self.compute_metrics = compute_metrics
self.max_items = max_items
def _add_metrics(self,predicted,actual):
metrics = self.compute_metrics(predicted,actual)
if metrics:
for m,val in metrics.iteritems():
self.cum_metrics[m] += val
self.count += 1
def process(self,testdata,recsfile,start,end,offset=1):
"""
Parameters
----------
testdata : scipy sparse matrix
The test items for each user.
recsfile : str
Filepath to the recommendations. The file should contain TSV
of the form: user, item, score. IMPORTANT: the recommendations must
be sorted by user and score.
start : int
First user to evaluate.
end: int
One after the last user to evaluate.
offset : int
Index offset for users and items in recommendations file.
Returns
-------
cum_metrics : dict
Aggregated metrics i.e. total values for all users.
count : int
The number of users for whom metrics were computed.
"""
from collections import defaultdict
self.cum_metrics = defaultdict(float)
self.count = 0
last_user = start
recs = []
for line in open(recsfile):
user,item,score = line.strip().split('\t')
user = int(user)-1 # convert to 0-indxed
item = int(item)-1
if user >= end:
break
if user < start:
continue
if user != last_user:
self._add_metrics(recs,testdata[last_user,:].indices.tolist())
last_user = user
recs = []
if len(recs) < self.max_items:
recs.append(item)
self._add_metrics(recs,testdata[last_user,:].indices.tolist())
return self.cum_metrics,self.count
| 3.171875
| 3
|
wab/core/emails/models.py
|
BinNguyenVNN/wab-rest
| 0
|
12782670
|
from django.db.models import BooleanField, CharField, TextField
from wab.core.components.models import BaseModel
class EmailTemplate(BaseModel):
code = CharField("Specific code for core app", max_length=50, blank=True, null=True, editable=False, unique=True)
is_protected = BooleanField("Is protected", default=False)
content = TextField("Html content")
| 2.171875
| 2
|
sort_teams.py
|
jhallman5/fencing_tournament
| 0
|
12782671
|
<filename>sort_teams.py<gh_stars>0
import sys
import csv
from linked_list import Node, Linked_List
from model import create_linked_list, determine_num_pools, create_init_pools
LL = create_linked_list()
num_pools = determine_num_pools(LL)
create_init_pools(LL, num_pools)
| 2.140625
| 2
|
Puzzles/Easy/TextFormatting.py
|
Naheuldark/Codingame
| 0
|
12782672
|
<reponame>Naheuldark/Codingame<filename>Puzzles/Easy/TextFormatting.py
import sys
import math
import re
text = input().lower().strip()
# Remove excessive spaces
text = re.sub(r'\s{2,}', ' ', text)
# Remove spaces before and after punctuations
text = re.sub(r'\s?[^\s\w\d]\s?', lambda match: match.group().strip(), text)
# Remove duplicated punctuations
text = re.sub(r'[^\s\w\d]+', lambda match: match.group().strip()[0], text)
# Convert to Pascal case
def toPascalCase(s):
if not s:
return ''
chars = list(s)
chars[0] = chars[0].upper()
return ''.join(chars)
text = '.'.join(toPascalCase(s) for s in text.split('.'))
# Add spaces after punctuation
text = re.sub(r'[^\s\w\d]', lambda match: match.group() + ' ', text)
print(text.strip())
| 3.6875
| 4
|
admin_toolbox/apps.py
|
sarendsen/django-admin-toolbox
| 12
|
12782673
|
<reponame>sarendsen/django-admin-toolbox
from django.apps import AppConfig
class SidebarConfig(AppConfig):
name = 'admin_toolbox'
| 1.226563
| 1
|
scripts/plot_cumulative_time.py
|
christophebedard/christophebedard.github.io
| 0
|
12782674
|
#!/usr/bin/env python3
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plot cumulative time based on daily time reporting data."""
from typing import List
from typing import Optional
from typing import Union
from datetime import date
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
include_plot_title = True
save_plots = True
def load_csv(filename: str) -> np.array:
with open(filename, 'rb') as file:
return np.loadtxt(
file,
delimiter=',',
skiprows=1,
usecols=(0,1),
dtype=str,
)
def filter_data(data: np.array) -> np.array:
return np.array([d for d in data if d[0] not in ('', 'total')])
def convert_data(data: np.array) -> np.array:
return np.array([[date.fromisoformat(d[0]), float(d[1])] for d in data])
def add_zeroth_datapoint(data: np.array) -> np.array:
return np.vstack([[[data[0,0], 0.0]], data])
def data_to_cumsum(data: np.array, col: int = 1) -> np.array:
data[:,col] = np.cumsum(data[:,col])
return data
def get_data(filename: str):
data = load_csv(filename)
data = filter_data(data)
data = convert_data(data)
data = add_zeroth_datapoint(data)
data = data_to_cumsum(data)
return data
def format_filename(string: str) -> str:
string = string.replace('(', '')
string = string.replace(')', '')
string = string.replace(' ', '_')
string = string.replace('\\', '')
return string.lower()
def plot_data(
data: np.array,
title: str,
major_formatter_str: str,
major_locator: Optional[mdates.RRuleLocator] = None,
yaxis_multiple_locator: Optional[int] = None,
colour: str = 'blue',
) -> None:
fig, ax = plt.subplots(1, 1)
ax.plot(data[:,0], data[:,1], '-', color=colour)
if include_plot_title:
ax.set(title=title)
ax.set(ylabel='cumulative time (h)')
if major_locator:
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(mdates.DateFormatter(major_formatter_str))
if yaxis_multiple_locator:
ax.yaxis.set_major_locator(ticker.MultipleLocator(yaxis_multiple_locator))
ax.set_ylim(0)
ax.grid()
fig.autofmt_xdate()
if save_plots:
filename = format_filename(title)
fig.savefig(f'{filename}.png', bbox_inches='tight')
fig.savefig(f'{filename}.svg', bbox_inches='tight')
def plot_data_compare(
data: List[np.array],
title: str,
legends: List[str],
major_formatter_str: str,
major_locator: Optional[mdates.RRuleLocator] = None,
yaxis_multiple_locator: Optional[int] = None,
colours: Union[str, List[str]] = 'blue',
) -> None:
fig, ax = plt.subplots(1, 1)
for i in range(len(data)):
colour = colours if isinstance(colours, str) else colours[i]
d = data[i]
ax.plot(d[:,0], d[:,1], '-', color=colour)
total_time = d[-1,1]
legends[i] = legends[i] + f' ({total_time:g} h)'
if include_plot_title:
ax.set(title=title)
ax.set(ylabel='cumulative time (h)')
if major_locator:
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(mdates.DateFormatter(major_formatter_str))
if yaxis_multiple_locator:
ax.yaxis.set_major_locator(ticker.MultipleLocator(yaxis_multiple_locator))
ax.set_ylim(0)
ax.legend(legends)#, loc='center', bbox_to_anchor=(0.3, 0.8))
ax.grid()
fig.autofmt_xdate()
if save_plots:
filename = format_filename(title)
fig.savefig(f'{filename}.png', bbox_inches='tight')
fig.savefig(f'{filename}.svg', bbox_inches='tight')
def main():
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=14)
plt.rc('axes', titlesize=20)
plt.rc('legend', fontsize=14)
# Under File, Download -> Comma-separated values (.csv, current sheet),
# download the 'Time' and 'Blog' sheets
data_time = get_data('rmw_email time tracking - Code.csv')
data_blog = get_data('rmw_email time tracking - Blog.csv')
plot_data(
data_time,
'rmw\_email code time investment',
'%Y %B',
colour='green',
)
plot_data(
data_blog,
'rmw\_email blog post time investment',
'%Y-%b-%d',
mdates.DayLocator((1,5,10,15,20,25)),
yaxis_multiple_locator=5,
colour='blue',
)
plot_data_compare(
[data_time, data_blog],
'Overall rmw\_email time investment',
['code', 'blog post'],
'%Y %B',
colours=['green', 'blue'],
)
plt.show()
if __name__ == '__main__':
main()
| 2.8125
| 3
|
ci.py
|
Glitter23/trying
| 0
|
12782675
|
<filename>ci.py
#Compounnd Interest
def ci(t,p,r):
A = p *((1+(r/100))**t)
CI = A - p
return CI
z=ci(4,50000,4)
print(z)
#amount
def amount(p,r,t):
A = p *((1+(r/100))**t)
return A
#rate
def rate(A,p,t):
r=((A/p)**(1/t)) - 1
return r
k=rate(90000,45000,2)
print(k)
| 3.4375
| 3
|
Data Structures/Linked Lists/Singly Linked List/Single-linked-list-operations.py
|
siddhi-244/CompetitiveProgrammingQuestionBank
| 931
|
12782676
|
# A pythom program for all operations performed on singly linked-list.
# Time-Complexity = O(n)
# Space-Complexity = O(n)
class Node:
def __init__(self, data=None, next=None): # Creation of Node
self.data = data
self.next = next
class LinkedList:
def __init__(self):
self.head = None # head points the first node
def print(self):
if self.head is None:
print("Linked list is empty")
return
itr = self.head
llstr = '' # empty string
while itr:
llstr += str(itr.data)+' --> ' if itr.next else str(itr.data)
itr = itr.next
print(llstr)
def length(self): # will calculate length of the linked list
count = 0
itr = self.head
while itr:
count += 1
itr = itr.next
return count
def insert_at_begining(self, data):
node = Node(data, self.head) # Creating a new node calling Node method
self.head = node
def insert_at_end(self, data):
if self.head is None:
self.head = Node(data, None)
return
itr = self.head
while itr.next:
itr = itr.next
itr.next = Node(data, None)
def insert_at(self, index, data):
if index < 0 or index > self.length():
raise Exception("Invalid Index")
if index == 0:
self.insert_at_begining(data)
return
count = 0
itr = self.head
while itr:
if count == index - 1:
node = Node(data, itr.next)
itr.next = node
break
itr = itr.next
count += 1
def remove_at(self, index):
if index < 0 or index >= self.length():
raise Exception("Invalid Index")
if index == 0:
self.head = self.head.next
return
count = 0
itr = self.head
while itr:
if count == index - 1:
itr.next = itr.next.next # to delete the specified node
break
itr = itr.next
count += 1
def insert_values(self, data_list):
self.head = None
for data in data_list:
self.insert_at_end(data)
# removing element at linkedlist with Value
def removeval(self, value):
count = 0
temp = self.head
while temp:
if value != temp.data:
count += 1
temp = temp.next
if count == self.length():
print("Value is not present")
else:
if value == self.head.data:
self.head = self.head.next
return
temp = self.head
while temp:
if value == temp.next.data:
temp.next = temp.next.next
break
temp = temp.next
if __name__ == '__main__':
node1 = LinkedList()
ins = list(input("Enter a values to be inserted by giving space[eg: python c++ java] : ").rstrip().split())
node1.insert_values(ins)
node1.print()
ind = int(input("Enter the index to be added: "))
val = input('Enter the value: ')
node1.insert_at(ind, val)
node1.print()
remm = int(input('Enter the index to be removed: '))
node1.remove_at(remm)
node1.print()
remval = input('Enter the value to be removed: ')
node1.removeval(remval)
node1.print()
inss = list(input("Enter a values to be inserted by giving space[eg: 45 30 22] : ").rstrip().split())
node1.insert_values(inss)
node1. print()
inend = int(input('Enter the number to be inserted at the end: '))
node1.insert_at_end(inend)
node1.print()
remval1 = input('Enter the value to be removed: ')
node1.removeval(remval1)
node1.print()
| 4.125
| 4
|
corrige_media/corrige_media.py
|
Arthurnevs/E1
| 0
|
12782677
|
<filename>corrige_media/corrige_media.py
'''
UFCG
PROGRAMAÇÃO 1
<NAME> DE BRITO - 119210204
CORRIGE MEDIA
'''
nota1 = 10
nota2 = 5
media = (nota1 + nota2) / 2
print("Nota 1: {:4.1f}".format(nota1))
print("Nota 2: {:4.1f}".format(nota2))
print("Média : {:4.1f}".format(media))
| 2.40625
| 2
|
Exercicios Python/ex0018.py
|
AlanOliveira1998/ExerciciosPython01-100
| 0
|
12782678
|
<gh_stars>0
#Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente e calcule e mostre a sua hipotenusa
import math
co = float(input('Digite o valor do cateto oposto:'))
ca = float(input('Digite o valor do cateto adjacente:'))
h = math.hypot(co, ca)
print(f'O valor da hipotenusa corresponde a {h}')
# h = (co ** 2 + ca ** 2) ** (1/2)
| 3.828125
| 4
|
pytest_tornado/test/test_server.py
|
FRI-DAY/pytest-tornado
| 0
|
12782679
|
import functools
import pytest
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('Hello, world')
application = tornado.web.Application([
(r'/', MainHandler),
(r'/f00', MainHandler),
])
@pytest.fixture(scope='module')
def app():
return application
def _fetch(http_client, url):
return http_client.io_loop.run_sync(
functools.partial(http_client.fetch, url))
def test_http_server(http_server):
status = {'done': False}
def _done():
status['done'] = True
http_server.io_loop.stop()
http_server.io_loop.add_callback(_done)
http_server.io_loop.start()
assert status['done']
def test_http_client(http_client, base_url):
request = http_client.fetch(base_url)
request.add_done_callback(lambda future: http_client.io_loop.stop())
http_client.io_loop.start()
response = request.result()
assert response.code == 200
def test_http_client_with_fetch_helper(http_client, base_url):
response = _fetch(http_client, base_url)
assert response.code == 200
@pytest.mark.gen_test
def test_http_client_with_gen_test(http_client, base_url):
response = yield http_client.fetch(base_url)
assert response.code == 200
@pytest.mark.gen_test
def test_get_url_with_path(http_client, base_url):
response = yield http_client.fetch('%s/f00' % base_url)
assert response.code == 200
@pytest.mark.gen_test
def test_http_client_raises_on_404(http_client, base_url):
with pytest.raises(tornado.httpclient.HTTPError):
yield http_client.fetch('%s/bar' % base_url)
| 2.1875
| 2
|
geonode/geonode/api/resourcebase_api.py
|
ttungbmt/BecaGIS_GeoPortal
| 0
|
12782680
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import re
import json
import logging
from django.urls import resolve
from django.db.models import Q
from django.http import HttpResponse
from django.conf import settings
from django.contrib.staticfiles.templatetags import staticfiles
from tastypie.authentication import MultiAuthentication, SessionAuthentication
from django.template.response import TemplateResponse
from tastypie import http
from tastypie.bundle import Bundle
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.utils import trailing_slash
from guardian.shortcuts import get_objects_for_user
from django.conf.urls import url
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist
from django.forms.models import model_to_dict
from tastypie.utils.mime import build_content_type
from geonode import get_version, qgis_server, geoserver
from geonode.layers.models import Layer
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.base.models import ResourceBase
from geonode.base.models import HierarchicalKeyword
from geonode.groups.models import GroupProfile
from geonode.utils import check_ogc_backend
from geonode.security.utils import get_visible_resources
from .authentication import OAuthAuthentication
from .authorization import GeoNodeAuthorization, GeonodeApiKeyAuthentication
from .api import (
TagResource,
RegionResource,
OwnersResource,
ThesaurusKeywordResource,
TopicCategoryResource,
GroupResource,
FILTER_TYPES)
from .paginator import CrossSiteXHRPaginator
from django.utils.translation import gettext as _
if settings.HAYSTACK_SEARCH:
from haystack.query import SearchQuerySet # noqa
logger = logging.getLogger(__name__)
LAYER_SUBTYPES = {
'vector': 'dataStore',
'raster': 'coverageStore',
'remote': 'remoteStore',
'vector_time': 'vectorTimeSeries',
}
FILTER_TYPES.update(LAYER_SUBTYPES)
class CommonMetaApi:
authorization = GeoNodeAuthorization()
allowed_methods = ['get']
filtering = {
'title': ALL,
'keywords': ALL_WITH_RELATIONS,
'tkeywords': ALL_WITH_RELATIONS,
'regions': ALL_WITH_RELATIONS,
'category': ALL_WITH_RELATIONS,
'group': ALL_WITH_RELATIONS,
'owner': ALL_WITH_RELATIONS,
'date': ALL,
'purpose': ALL,
'abstract': ALL
}
ordering = ['date', 'title', 'popular_count']
max_limit = None
class CommonModelApi(ModelResource):
keywords = fields.ToManyField(TagResource, 'keywords', null=True)
regions = fields.ToManyField(RegionResource, 'regions', null=True)
category = fields.ToOneField(
TopicCategoryResource,
'category',
null=True,
full=True)
group = fields.ToOneField(
GroupResource,
'group',
null=True,
full=True)
owner = fields.ToOneField(OwnersResource, 'owner', full=True)
tkeywords = fields.ToManyField(
ThesaurusKeywordResource, 'tkeywords', null=True)
VALUES = [
# fields in the db
'id',
'uuid',
'title',
'date',
'date_type',
'edition',
'purpose',
'maintenance_frequency',
'restriction_code_type',
'constraints_other',
'license',
'language',
'spatial_representation_type',
'temporal_extent_start',
'temporal_extent_end',
'data_quality_statement',
'abstract',
'csw_wkt_geometry',
'csw_type',
'owner__username',
'share_count',
'popular_count',
'srid',
'bbox_x0',
'bbox_x1',
'bbox_y0',
'bbox_y1',
'category__gn_description',
'supplemental_information',
'site_url',
'thumbnail_url',
'detail_url',
'rating',
'group__name',
'has_time',
'is_approved',
'is_published',
'dirty_state',
]
def build_filters(self, filters=None, ignore_bad_filters=False, **kwargs):
if filters is None:
filters = {}
orm_filters = super(CommonModelApi, self).build_filters(
filters=filters, ignore_bad_filters=ignore_bad_filters, **kwargs)
if 'type__in' in filters and filters['type__in'] in FILTER_TYPES.keys():
orm_filters.update({'type': filters.getlist('type__in')})
if 'app_type__in' in filters:
orm_filters.update({'polymorphic_ctype__model': filters['app_type__in'].lower()})
if 'extent' in filters:
orm_filters.update({'extent': filters['extent']})
orm_filters['f_method'] = filters['f_method'] if 'f_method' in filters else 'and'
if not settings.SEARCH_RESOURCES_EXTENDED:
return self._remove_additional_filters(orm_filters)
return orm_filters
def _remove_additional_filters(self, orm_filters):
orm_filters.pop('abstract__icontains', None)
orm_filters.pop('purpose__icontains', None)
orm_filters.pop('f_method', None)
return orm_filters
def apply_filters(self, request, applicable_filters):
types = applicable_filters.pop('type', None)
extent = applicable_filters.pop('extent', None)
keywords = applicable_filters.pop('keywords__slug__in', None)
filtering_method = applicable_filters.pop('f_method', 'and')
if filtering_method == 'or':
filters = Q()
for f in applicable_filters.items():
filters |= Q(f)
semi_filtered = self.get_object_list(request).filter(filters)
else:
semi_filtered = super(
CommonModelApi,
self).apply_filters(
request,
applicable_filters)
filtered = None
if types:
for the_type in types:
if the_type in LAYER_SUBTYPES.keys():
super_type = the_type
if 'vector_time' == the_type:
super_type = 'vector'
if filtered:
if 'time' in the_type:
filtered = filtered | semi_filtered.filter(
Layer___storeType=LAYER_SUBTYPES[super_type]).exclude(Layer___has_time=False)
else:
filtered = filtered | semi_filtered.filter(
Layer___storeType=LAYER_SUBTYPES[super_type])
else:
if 'time' in the_type:
filtered = semi_filtered.filter(
Layer___storeType=LAYER_SUBTYPES[super_type]).exclude(Layer___has_time=False)
else:
filtered = semi_filtered.filter(
Layer___storeType=LAYER_SUBTYPES[super_type])
else:
_type_filter = FILTER_TYPES[the_type].__name__.lower()
if filtered:
filtered = filtered | semi_filtered.filter(polymorphic_ctype__model=_type_filter)
else:
filtered = semi_filtered.filter(polymorphic_ctype__model=_type_filter)
else:
filtered = semi_filtered
if settings.RESOURCE_PUBLISHING or settings.ADMIN_MODERATE_UPLOADS:
filtered = self.filter_published(filtered, request)
if settings.GROUP_PRIVATE_RESOURCES:
filtered = self.filter_group(filtered, request)
if extent:
filtered = self.filter_bbox(filtered, extent)
if keywords:
filtered = self.filter_h_keywords(filtered, keywords)
# Hide Dirty State Resources
user = request.user if request else None
if not user or not user.is_superuser:
if user:
filtered = filtered.exclude(Q(dirty_state=True) & ~(
Q(owner__username__iexact=str(user))))
else:
filtered = filtered.exclude(Q(dirty_state=True))
return filtered
def filter_published(self, queryset, request):
filter_set = get_visible_resources(
queryset,
request.user if request else None,
request=request,
admin_approval_required=settings.ADMIN_MODERATE_UPLOADS,
unpublished_not_visible=settings.RESOURCE_PUBLISHING)
return filter_set
def filter_group(self, queryset, request):
filter_set = get_visible_resources(
queryset,
request.user if request else None,
request=request,
private_groups_not_visibile=settings.GROUP_PRIVATE_RESOURCES)
return filter_set
def filter_h_keywords(self, queryset, keywords):
filtered = queryset
treeqs = HierarchicalKeyword.objects.none()
if keywords and len(keywords) > 0:
for keyword in keywords:
try:
kws = HierarchicalKeyword.objects.filter(
Q(name__iexact=keyword) | Q(slug__iexact=keyword))
for kw in kws:
treeqs = treeqs | HierarchicalKeyword.get_tree(kw)
except ObjectDoesNotExist:
# Ignore keywords not actually used?
pass
filtered = queryset.filter(Q(keywords__in=treeqs))
return filtered
def filter_bbox(self, queryset, extent_filter):
from geonode.utils import bbox_to_projection
bbox = extent_filter.split(',')
bbox = list(map(str, bbox))
intersects = (Q(bbox_x0__gte=bbox[0]) & Q(bbox_x1__lte=bbox[2]) &
Q(bbox_y0__gte=bbox[1]) & Q(bbox_y1__lte=bbox[3]))
for proj in Layer.objects.order_by('srid').values('srid').distinct():
if proj['srid'] != 'EPSG:4326':
proj_bbox = bbox_to_projection(bbox + ['4326', ],
target_srid=int(proj['srid'][5:]))
if proj_bbox[-1] != 4326:
intersects = intersects | (Q(bbox_x0__gte=proj_bbox[0]) & Q(bbox_x1__lte=proj_bbox[2]) & Q(
bbox_y0__gte=proj_bbox[1]) & Q(bbox_y1__lte=proj_bbox[3]))
return queryset.filter(intersects)
def build_haystack_filters(self, parameters):
from haystack.inputs import Raw
from haystack.query import SearchQuerySet, SQ # noqa
sqs = None
# Retrieve Query Params
# Text search
query = parameters.get('q', None)
# Types and subtypes to filter (map, layer, vector, etc)
type_facets = parameters.getlist("type__in", [])
# If coming from explore page, add type filter from resource_name
resource_filter = self._meta.resource_name.rstrip("s")
if resource_filter != "base" and resource_filter not in type_facets:
type_facets.append(resource_filter)
# Publication date range (start,end)
date_end = parameters.get("date__lte", None)
date_start = parameters.get("date__gte", None)
# Topic category filter
category = parameters.getlist("category__identifier__in")
# Keyword filter
keywords = parameters.getlist("keywords__slug__in")
# Region filter
regions = parameters.getlist("regions__name__in")
# Owner filters
owner = parameters.getlist("owner__username__in")
# Sort order
sort = parameters.get("order_by", "relevance")
# Geospatial Elements
bbox = parameters.get("extent", None)
# Filter by Type and subtype
if type_facets is not None:
types = []
subtypes = []
for type in type_facets:
if type in {"map", "layer", "document", "user"}:
# Type is one of our Major Types (not a sub type)
types.append(type)
elif type in LAYER_SUBTYPES.keys():
subtypes.append(type)
if 'vector' in subtypes and 'vector_time' not in subtypes:
subtypes.append('vector_time')
if len(subtypes) > 0:
types.append("layer")
sqs = SearchQuerySet().narrow(f"subtype:{','.join(map(str, subtypes))}")
if len(types) > 0:
sqs = (SearchQuerySet() if sqs is None else sqs).narrow(
f"type:{','.join(map(str, types))}")
# Filter by Query Params
# haystack bug? if boosted fields aren't included in the
# query, then the score won't be affected by the boost
if query:
if query.startswith('"') or query.startswith('\''):
# Match exact phrase
phrase = query.replace('"', '')
sqs = (SearchQuerySet() if sqs is None else sqs).filter(
SQ(title__exact=phrase) |
SQ(description__exact=phrase) |
SQ(content__exact=phrase)
)
else:
words = [
w for w in re.split(
r'\W',
query,
flags=re.UNICODE) if w]
for i, search_word in enumerate(words):
if i == 0:
sqs = (SearchQuerySet() if sqs is None else sqs) \
.filter(
SQ(title=Raw(search_word)) |
SQ(description=Raw(search_word)) |
SQ(content=Raw(search_word))
)
elif search_word in {"AND", "OR"}:
pass
elif words[i - 1] == "OR": # previous word OR this word
sqs = sqs.filter_or(
SQ(title=Raw(search_word)) |
SQ(description=Raw(search_word)) |
SQ(content=Raw(search_word))
)
else: # previous word AND this word
sqs = sqs.filter(
SQ(title=Raw(search_word)) |
SQ(description=Raw(search_word)) |
SQ(content=Raw(search_word))
)
# filter by category
if category:
sqs = (SearchQuerySet() if sqs is None else sqs).narrow(
f"category:{','.join(map(str, category))}")
# filter by keyword: use filter_or with keywords_exact
# not using exact leads to fuzzy matching and too many results
# using narrow with exact leads to zero results if multiple keywords
# selected
if keywords:
for keyword in keywords:
sqs = (
SearchQuerySet() if sqs is None else sqs).filter_or(
keywords_exact=keyword)
# filter by regions: use filter_or with regions_exact
# not using exact leads to fuzzy matching and too many results
# using narrow with exact leads to zero results if multiple keywords
# selected
if regions:
for region in regions:
sqs = (
SearchQuerySet() if sqs is None else sqs).filter_or(
regions_exact__exact=region)
# filter by owner
if owner:
sqs = (
SearchQuerySet() if sqs is None else sqs).narrow(
f"owner__username:{','.join(map(str, owner))}")
# filter by date
if date_start:
sqs = (SearchQuerySet() if sqs is None else sqs).filter(
SQ(date__gte=date_start)
)
if date_end:
sqs = (SearchQuerySet() if sqs is None else sqs).filter(
SQ(date__lte=date_end)
)
# Filter by geographic bounding box
if bbox:
left, bottom, right, top = bbox.split(',')
sqs = (
SearchQuerySet() if sqs is None else sqs).exclude(
SQ(
bbox_top__lte=bottom) | SQ(
bbox_bottom__gte=top) | SQ(
bbox_left__gte=right) | SQ(
bbox_right__lte=left))
# Apply sort
if sort.lower() == "-date":
sqs = (
SearchQuerySet() if sqs is None else sqs).order_by("-date")
elif sort.lower() == "date":
sqs = (
SearchQuerySet() if sqs is None else sqs).order_by("date")
elif sort.lower() == "title":
sqs = (SearchQuerySet() if sqs is None else sqs).order_by(
"title_sortable")
elif sort.lower() == "-title":
sqs = (SearchQuerySet() if sqs is None else sqs).order_by(
"-title_sortable")
elif sort.lower() == "-popular_count":
sqs = (SearchQuerySet() if sqs is None else sqs).order_by(
"-popular_count")
else:
sqs = (
SearchQuerySet() if sqs is None else sqs).order_by("-date")
return sqs
def get_search(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Get the list of objects that matches the filter
sqs = self.build_haystack_filters(request.GET)
if not settings.SKIP_PERMS_FILTER:
filter_set = get_objects_for_user(
request.user, 'base.view_resourcebase')
filter_set = get_visible_resources(
filter_set,
request.user if request else None,
admin_approval_required=settings.ADMIN_MODERATE_UPLOADS,
unpublished_not_visible=settings.RESOURCE_PUBLISHING,
private_groups_not_visibile=settings.GROUP_PRIVATE_RESOURCES)
filter_set_ids = filter_set.values_list('id')
# Do the query using the filterset and the query term. Facet the
# results
if len(filter_set) > 0:
sqs = sqs.filter(id__in=filter_set_ids).facet('type').facet('subtype').facet(
'owner') .facet('keywords').facet('regions').facet('category')
else:
sqs = None
else:
sqs = sqs.facet('type').facet('subtype').facet(
'owner').facet('keywords').facet('regions').facet('category')
if sqs:
# Build the Facet dict
facets = {}
for facet in sqs.facet_counts()['fields']:
facets[facet] = {}
for item in sqs.facet_counts()['fields'][facet]:
facets[facet][item[0]] = item[1]
# Paginate the results
paginator = Paginator(sqs, request.GET.get('limit'))
try:
page = paginator.page(
int(request.GET.get('offset') or 0) /
int(request.GET.get('limit') or 0 + 1))
except InvalidPage:
raise Http404("Sorry, no results on that page.")
if page.has_previous():
previous_page = page.previous_page_number()
else:
previous_page = 1
if page.has_next():
next_page = page.next_page_number()
else:
next_page = 1
total_count = sqs.count()
objects = page.object_list
else:
next_page = 0
previous_page = 0
total_count = 0
facets = {}
objects = []
object_list = {
"meta": {
"limit": settings.CLIENT_RESULTS_LIMIT,
"next": next_page,
"offset": int(getattr(request.GET, 'offset', 0)),
"previous": previous_page,
"total_count": total_count,
"facets": facets,
},
"objects": [self.get_haystack_api_fields(x) for x in objects],
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
def get_haystack_api_fields(self, haystack_object):
return {k: v for k, v in haystack_object.get_stored_fields().items()
if not re.search('_exact$|_sortable$', k)}
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(
bundle=base_bundle,
**self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(
request.GET,
sorted_objects,
resource_uri=self.get_resource_uri(),
limit=self._meta.limit,
max_limit=self._meta.max_limit,
collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
to_be_serialized = self.alter_list_data_to_serialize(
request,
to_be_serialized)
return self.create_response(
request, to_be_serialized, response_objects=objects)
def format_objects(self, objects):
"""
Format the objects for output in a response.
"""
for key in ('site_url', 'has_time'):
if key in self.VALUES:
idx = self.VALUES.index(key)
del self.VALUES[idx]
# hack needed because dehydrate does not seem to work in CommonModelApi
formatted_objects = []
for obj in objects:
formatted_obj = model_to_dict(obj, fields=self.VALUES)
if 'site_url' not in formatted_obj or len(formatted_obj['site_url']) == 0:
formatted_obj['site_url'] = settings.SITEURL
if formatted_obj['thumbnail_url'] and len(formatted_obj['thumbnail_url']) == 0:
formatted_obj['thumbnail_url'] = staticfiles.static(settings.MISSING_THUMBNAIL)
formatted_obj['owner__username'] = obj.owner.username
formatted_obj['owner_name'] = obj.owner.get_full_name() or obj.owner.username
# replace thumbnail_url with curated_thumbs
if hasattr(obj, 'curatedthumbnail'):
if hasattr(obj.curatedthumbnail.img_thumbnail, 'url'):
formatted_obj['thumbnail_url'] = obj.curatedthumbnail.thumbnail_url
else:
formatted_obj['thumbnail_url'] = ''
formatted_objects.append(formatted_obj)
return formatted_objects
def create_response(
self,
request,
data,
response_class=HttpResponse,
response_objects=None,
**response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
# If an user does not have at least view permissions, he won't be able
# to see the resource at all.
filtered_objects_ids = None
try:
if data['objects']:
filtered_objects_ids = [
item.id for item in data['objects'] if request.user.has_perm(
'view_resourcebase', item.get_self_resource())]
except Exception:
pass
if isinstance(
data,
dict) and 'objects' in data and not isinstance(
data['objects'],
list):
if filtered_objects_ids:
data['objects'] = [
x for x in list(
self.format_objects(
data['objects'])) if x['id'] in filtered_objects_ids]
else:
data['objects'] = list(self.format_objects(data['objects']))
# give geonode version
data['geonode_version'] = get_version()
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(
content=serialized,
content_type=build_content_type(desired_format),
**response_kwargs)
def prepend_urls(self):
if settings.HAYSTACK_SEARCH:
return [
url(r"^(?P<resource_name>%s)/search%s$" % (
self._meta.resource_name, trailing_slash()
),
self.wrap_view('get_search'), name="api_get_search"),
]
else:
return []
class ResourceBaseResource(CommonModelApi):
"""ResourceBase api"""
class Meta(CommonMetaApi):
paginator_class = CrossSiteXHRPaginator
queryset = ResourceBase.objects.polymorphic_queryset() \
.distinct().order_by('-date')
resource_name = 'base'
excludes = ['csw_anytext', 'metadata_xml']
authentication = MultiAuthentication(SessionAuthentication(),
OAuthAuthentication(),
GeonodeApiKeyAuthentication())
class FeaturedResourceBaseResource(CommonModelApi):
"""Only the featured resourcebases"""
class Meta(CommonMetaApi):
paginator_class = CrossSiteXHRPaginator
queryset = ResourceBase.objects.filter(featured=True).order_by('-date')
resource_name = 'featured'
authentication = MultiAuthentication(SessionAuthentication(),
OAuthAuthentication(),
GeonodeApiKeyAuthentication())
class LayerResource(CommonModelApi):
"""Layer API"""
links = fields.ListField(
attribute='links',
null=True,
use_in='all',
default=[])
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
default_style = fields.ForeignKey(
'geonode.api.api.StyleResource',
attribute='qgis_default_style',
null=True)
styles = fields.ManyToManyField(
'geonode.api.api.StyleResource',
attribute='qgis_styles',
null=True,
use_in='detail')
elif check_ogc_backend(geoserver.BACKEND_PACKAGE):
default_style = fields.ForeignKey(
'geonode.api.api.StyleResource',
attribute='default_style',
null=True)
styles = fields.ManyToManyField(
'geonode.api.api.StyleResource',
attribute='styles',
null=True,
use_in='detail')
def format_objects(self, objects):
"""
Formats the object.
"""
formatted_objects = []
for obj in objects:
# convert the object to a dict using the standard values.
# includes other values
values = self.VALUES + [
'alternate',
'name'
]
formatted_obj = model_to_dict(obj, fields=values)
username = obj.owner.get_username()
full_name = (obj.owner.get_full_name() or username)
formatted_obj['owner__username'] = username
formatted_obj['owner_name'] = full_name
if obj.category:
formatted_obj['category__gn_description'] = _(obj.category.gn_description)
if obj.group:
formatted_obj['group'] = obj.group
try:
formatted_obj['group_name'] = GroupProfile.objects.get(slug=obj.group.name)
except GroupProfile.DoesNotExist:
formatted_obj['group_name'] = obj.group
formatted_obj['keywords'] = [k.name for k in obj.keywords.all()] if obj.keywords else []
formatted_obj['regions'] = [r.name for r in obj.regions.all()] if obj.regions else []
# provide style information
bundle = self.build_bundle(obj=obj)
formatted_obj['default_style'] = self.default_style.dehydrate(
bundle, for_list=True)
# Add resource uri
formatted_obj['resource_uri'] = self.get_resource_uri(bundle)
formatted_obj['links'] = self.dehydrate_ogc_links(bundle)
if 'site_url' not in formatted_obj or len(formatted_obj['site_url']) == 0:
formatted_obj['site_url'] = settings.SITEURL
# Probe Remote Services
formatted_obj['store_type'] = 'dataset'
formatted_obj['online'] = True
if hasattr(obj, 'storeType'):
formatted_obj['store_type'] = obj.storeType
if obj.storeType == 'remoteStore' and hasattr(obj, 'remote_service'):
if obj.remote_service:
formatted_obj['online'] = (obj.remote_service.probe == 200)
else:
formatted_obj['online'] = False
formatted_obj['gtype'] = self.dehydrate_gtype(bundle)
# replace thumbnail_url with curated_thumbs
if hasattr(obj, 'curatedthumbnail'):
formatted_obj['thumbnail_url'] = obj.curatedthumbnail.thumbnail_url
formatted_obj['processed'] = obj.instance_is_processed
# put the object on the response stack
formatted_objects.append(formatted_obj)
return formatted_objects
def _dehydrate_links(self, bundle, link_types=None):
"""Dehydrate links field."""
dehydrated = []
obj = bundle.obj
link_fields = [
'extension',
'link_type',
'name',
'mime',
'url'
]
links = obj.link_set.all()
if link_types:
links = links.filter(link_type__in=link_types)
for lnk in links:
formatted_link = model_to_dict(lnk, fields=link_fields)
dehydrated.append(formatted_link)
return dehydrated
def dehydrate_links(self, bundle):
return self._dehydrate_links(bundle)
def dehydrate_ogc_links(self, bundle):
return self._dehydrate_links(bundle, ['OGC:WMS', 'OGC:WFS', 'OGC:WCS'])
def dehydrate_gtype(self, bundle):
return bundle.obj.gtype
def populate_object(self, obj):
"""Populate results with necessary fields
:param obj: Layer obj
:type obj: Layer
:return:
"""
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
# Provides custom links for QGIS Server styles info
# Default style
try:
obj.qgis_default_style = obj.qgis_layer.default_style
except Exception:
obj.qgis_default_style = None
# Styles
try:
obj.qgis_styles = obj.qgis_layer.styles
except Exception:
obj.qgis_styles = []
return obj
def build_bundle(
self, obj=None, data=None, request=None, **kwargs):
"""Override build_bundle method to add additional info."""
if obj is None and self._meta.object_class:
obj = self._meta.object_class()
elif obj:
obj = self.populate_object(obj)
return Bundle(
obj=obj,
data=data,
request=request, **kwargs)
def patch_detail(self, request, **kwargs):
"""Allow patch request to update default_style.
Request body must match this:
{
'default_style': <resource_uri_to_style>
}
"""
reason = 'Can only patch "default_style" field.'
try:
body = json.loads(request.body)
if 'default_style' not in body:
return http.HttpBadRequest(reason=reason)
match = resolve(body['default_style'])
style_id = match.kwargs['id']
api_name = match.kwargs['api_name']
resource_name = match.kwargs['resource_name']
if not (resource_name == 'styles' and api_name == 'api'):
raise Exception()
from geonode.qgis_server.models import QGISServerStyle
style = QGISServerStyle.objects.get(id=style_id)
layer_id = kwargs['id']
layer = Layer.objects.get(id=layer_id)
except Exception:
return http.HttpBadRequest(reason=reason)
from geonode.qgis_server.views import default_qml_style
request.method = 'POST'
response = default_qml_style(
request,
layername=layer.name,
style_name=style.name)
if isinstance(response, TemplateResponse):
if response.status_code == 200:
return HttpResponse(status=200)
return self.error_response(request, response.content)
# copy parent attribute before modifying
VALUES = CommonModelApi.VALUES[:]
VALUES.append('typename')
class Meta(CommonMetaApi):
paginator_class = CrossSiteXHRPaginator
queryset = Layer.objects.distinct().order_by('-date')
resource_name = 'layers'
detail_uri_name = 'id'
include_resource_uri = True
allowed_methods = ['get', 'patch']
excludes = ['csw_anytext', 'metadata_xml']
authentication = MultiAuthentication(SessionAuthentication(),
OAuthAuthentication(),
GeonodeApiKeyAuthentication())
filtering = CommonMetaApi.filtering
# Allow filtering using ID
filtering.update({
'id': ALL,
'name': ALL,
'alternate': ALL,
})
class MapResource(CommonModelApi):
"""Maps API"""
def format_objects(self, objects):
"""
Formats the objects and provides reference to list of layers in map
resources.
:param objects: Map objects
"""
formatted_objects = []
for obj in objects:
# convert the object to a dict using the standard values.
formatted_obj = model_to_dict(obj, fields=self.VALUES)
username = obj.owner.get_username()
full_name = (obj.owner.get_full_name() or username)
formatted_obj['owner__username'] = username
formatted_obj['owner_name'] = full_name
if obj.category:
formatted_obj['category__gn_description'] = _(obj.category.gn_description)
if obj.group:
formatted_obj['group'] = obj.group
try:
formatted_obj['group_name'] = GroupProfile.objects.get(slug=obj.group.name)
except GroupProfile.DoesNotExist:
formatted_obj['group_name'] = obj.group
formatted_obj['keywords'] = [k.name for k in obj.keywords.all()] if obj.keywords else []
formatted_obj['regions'] = [r.name for r in obj.regions.all()] if obj.regions else []
if 'site_url' not in formatted_obj or len(formatted_obj['site_url']) == 0:
formatted_obj['site_url'] = settings.SITEURL
# Probe Remote Services
formatted_obj['store_type'] = 'map'
formatted_obj['online'] = True
# get map layers
map_layers = obj.layers
formatted_layers = []
map_layer_fields = [
'id',
'stack_order',
'format',
'name',
'opacity',
'group',
'visibility',
'transparent',
'ows_url',
'layer_params',
'source_params',
'local'
]
for layer in map_layers:
formatted_map_layer = model_to_dict(
layer, fields=map_layer_fields)
formatted_layers.append(formatted_map_layer)
formatted_obj['layers'] = formatted_layers
# replace thumbnail_url with curated_thumbs
try:
if hasattr(obj, 'curatedthumbnail'):
if hasattr(obj.curatedthumbnail.img_thumbnail, 'url'):
formatted_obj['thumbnail_url'] = obj.curatedthumbnail.thumbnail_url
else:
formatted_obj['thumbnail_url'] = ''
except Exception as e:
formatted_obj['thumbnail_url'] = ''
logger.exception(e)
formatted_objects.append(formatted_obj)
return formatted_objects
class Meta(CommonMetaApi):
paginator_class = CrossSiteXHRPaginator
queryset = Map.objects.distinct().order_by('-date')
resource_name = 'maps'
authentication = MultiAuthentication(SessionAuthentication(),
OAuthAuthentication(),
GeonodeApiKeyAuthentication())
class DocumentResource(CommonModelApi):
"""Documents API"""
def format_objects(self, objects):
"""
Formats the objects and provides reference to list of layers in map
resources.
:param objects: Map objects
"""
formatted_objects = []
for obj in objects:
# convert the object to a dict using the standard values.
formatted_obj = model_to_dict(obj, fields=self.VALUES)
username = obj.owner.get_username()
full_name = (obj.owner.get_full_name() or username)
formatted_obj['owner__username'] = username
formatted_obj['owner_name'] = full_name
if obj.category:
formatted_obj['category__gn_description'] = _(obj.category.gn_description)
if obj.group:
formatted_obj['group'] = obj.group
try:
formatted_obj['group_name'] = GroupProfile.objects.get(slug=obj.group.name)
except GroupProfile.DoesNotExist:
formatted_obj['group_name'] = obj.group
formatted_obj['keywords'] = [k.name for k in obj.keywords.all()] if obj.keywords else []
formatted_obj['regions'] = [r.name for r in obj.regions.all()] if obj.regions else []
if 'site_url' not in formatted_obj or len(formatted_obj['site_url']) == 0:
formatted_obj['site_url'] = settings.SITEURL
# Probe Remote Services
formatted_obj['store_type'] = 'dataset'
formatted_obj['online'] = True
# replace thumbnail_url with curated_thumbs
if hasattr(obj, 'curatedthumbnail'):
try:
if hasattr(obj.curatedthumbnail.img_thumbnail, 'url'):
formatted_obj['thumbnail_url'] = obj.curatedthumbnail.thumbnail_url
else:
formatted_obj['thumbnail_url'] = ''
except Exception:
formatted_obj['thumbnail_url'] = ''
formatted_objects.append(formatted_obj)
return formatted_objects
class Meta(CommonMetaApi):
paginator_class = CrossSiteXHRPaginator
filtering = CommonMetaApi.filtering
filtering.update({'doc_type': ALL})
queryset = Document.objects.distinct().order_by('-date')
resource_name = 'documents'
authentication = MultiAuthentication(SessionAuthentication(),
OAuthAuthentication(),
GeonodeApiKeyAuthentication())
| 1.648438
| 2
|
PythonBaseDemo/CommonModules/10.8/Counter_test2.py
|
CypHelp/TestNewWorldDemo
| 0
|
12782681
|
<reponame>CypHelp/TestNewWorldDemo
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
from collections import Counter
# 创建Counter对象
cnt = Counter()
# 访问并不存在的key,将输出该key的次数为0.
print(cnt['Python']) # 0
for word in ['Swift', 'Python', 'Kotlin', 'Kotlin', 'Swift', 'Go']:
cnt[word] += 1
print(cnt)
# 只访问Counter对象的元素
print(list(cnt.elements()))
# 将字符串(迭代器)转换成Counter
chr_cnt = Counter('abracadabra')
# 获取出现最多的3个字母
print(chr_cnt.most_common(3)) # [('a', 5), ('b', 2), ('r', 2)]
c = Counter(a=4, b=2, c=0, d=-2)
d = Counter(a=1, b=2, c=3, d=4)
# 用Counter对象执行减法,其实就是减少各元素的出现次数
c.subtract(d)
print(c) # Counter({'a': 3, 'b': 0, 'c': -3, 'd': -6})
e = Counter({'x': 2, 'y': 3, 'z': -4})
# 调用del删除key-value对,会真正删除该key-value对
del e['y']
print(e)
# 访问'w'对应的value,'w'没有出现过,因此返回0
print(e['w']) # 0
# 删除e['w'],删除该key-value对
del e['w']
# 再次访问'w'对应的value,'w'还是没有,因此返回0
print(e['w']) # 0
| 3.171875
| 3
|
run.py
|
wellcomecollection/file_auditor
| 0
|
12782682
|
#!/usr/bin/env python2
from __future__ import print_function
import csv
import datetime
import errno
import hashlib
import os
import sys
import traceback
import zipfile
AUDIT_CSV_PATH = "audit.csv"
AUDIT_ZIPFILES_CSV_PATH = "audit_with_zipfile_entries.csv"
AUDIT_CSV_FIELDNAMES = ["path", "size", "last_modified_time", "sha256"]
AUDIT_ZIPFILES_CSV_FIELDNAMES = ["path", "entry_filename", "size", "sha256"]
def get_size_and_sha256(infile):
"""
Returns the size and SHA256 checksum (as hex) of the given file.
"""
h = hashlib.sha256()
size = 0
while True:
chunk = infile.read(8192)
if not chunk:
break
h.update(chunk)
size += len(chunk)
return (size, h.hexdigest())
def get_file_paths_under(root):
"""Generates the paths to every file under ``root``."""
if not os.path.isdir(root):
raise ValueError("Cannot find files under non-existent directory: %r" % root)
for dirpath, _, filenames in os.walk(root):
for f in filenames:
if os.path.isfile(os.path.join(dirpath, f)):
yield os.path.join(dirpath, f)
def get_existing_audit_entries():
"""
Returns a list of all the entries already saved in ``AUDIT_CSV_PATH``.
"""
try:
with open(AUDIT_CSV_PATH) as infile:
return list(csv.DictReader(infile))
except IOError as err:
if err.errno == errno.ENOENT:
with open(AUDIT_CSV_PATH, "w") as outfile:
writer = csv.DictWriter(outfile, fieldnames=AUDIT_CSV_FIELDNAMES)
writer.writeheader()
return []
else:
raise
def get_existing_audit_zip_entries(path):
"""
Returns a list of all the entries already saved in ``AUDIT_ZIPFILES_CSV_PATH``
that match ``path``.
"""
try:
with open(AUDIT_ZIPFILES_CSV_PATH) as infile:
return [entry for entry in csv.DictReader(infile) if entry["path"] == path]
except IOError as err:
if err.errno == errno.ENOENT:
with open(AUDIT_ZIPFILES_CSV_PATH, "w") as outfile:
writer = csv.DictWriter(
outfile, fieldnames=AUDIT_ZIPFILES_CSV_FIELDNAMES
)
writer.writeheader()
return []
else:
raise
def get_paths_to_audit(root):
"""
Generates a list of paths that should be audited.
"""
existing_audit_paths = {e["path"] for e in get_existing_audit_entries()}
for path in get_file_paths_under(root):
# These files are of no consequence. We can ignore them.
if os.path.basename(path) in {".DS_Store", "Thumbs.db"}:
continue
if path in existing_audit_paths:
continue
yield path
def record_audit_for_zipfile_entries(path):
"""
Record audit information for all the entries in a zipfile.
"""
assert path.endswith(".zip")
existing_zip_entry_names = {e["name"] for e in get_existing_audit_zip_entries(path)}
with open(AUDIT_ZIPFILES_CSV_PATH, "a") as outfile:
writer = csv.DictWriter(outfile, fieldnames=AUDIT_ZIPFILES_CSV_FIELDNAMES)
with zipfile.ZipFile(path) as zf:
for info in zf.infolist():
if info.filename in existing_zip_entry_names:
continue
with zf.open(info) as entry:
size, sha256 = get_size_and_sha256(entry)
writer.writerow(
{
"path": path,
"entry_filename": info.filename,
"size": size,
"sha256": sha256,
}
)
def record_audit_for_path(path):
"""
Record audit information for a single file.
"""
with open(AUDIT_CSV_PATH, "a") as outfile:
writer = csv.DictWriter(outfile, fieldnames=AUDIT_CSV_FIELDNAMES)
stat = os.stat(path)
with open(path, "rb") as infile:
size, sha256 = get_size_and_sha256(infile)
mtime = os.stat(path).st_mtime
last_modified_time = datetime.datetime.fromtimestamp(mtime).isoformat()
writer.writerow(
{
"path": path,
"size": size,
"last_modified_time": last_modified_time,
"sha256": sha256,
}
)
if __name__ == "__main__":
try:
root = sys.argv[1]
except IndexError:
sys.exit("Usage: %s <ROOT>" % __file__)
for path in get_paths_to_audit(root=root):
print(path)
try:
if path.endswith(".zip"):
record_audit_for_zipfile_entries(path)
record_audit_for_path(path)
except Exception as exc:
with open("exceptions.log", "a") as outfile:
outfile.write("Exception while trying to audit %r:\n\n" % path)
traceback.print_exc(file=outfile)
outfile.write("\n---\n\n")
| 2.921875
| 3
|
userlib/analysislib/andika/python BEC analysis/QgasUtils.py
|
specialforcea/labscript_suite
| 0
|
12782683
|
"""
Created on Mon Sep 9 15:51:35 2013
QgasUtils: Basic Quantum Gas Utilities functions
@author: ispielman
Modified on Wed Dec 10 11:26: 2014
@author: aputra
"""
import numpy
import scipy.ndimage
def ImageSlice(xVals, yVals, Image, r0, Width, Scaled = False):
"""
Produces a pair of slices from image of a band with 'Width' centered at r0 = [x y]
Scaled : 'False' use pixels directly, and 'True' compute scaling from (xvals and yvals) assuming
they are linearly spaced
Currently Width and x,y are in scaled units, not pixel units.
the return will be ((xvals xslice) (yvals yslice)), where each entry is a numpy array.
these are copies, not views.
"""
if (Scaled):
(xMin, yMin) = numpy.floor(GetPixelCoordsFromImage(r0, - Width/2, xVals, yVals));
(xMax, yMax) = numpy.ceil(GetPixelCoordsFromImage(r0, Width/2, xVals, yVals));
else:
(xMin, yMin) = r0 - numpy.round(Width/2);
(xMax, yMax) = r0 + numpy.round(Width/2);
# Extract bands of desired width
# These are slices, so views of the initial data
if xMin<0: xMin =0
if yMin<0: yMin =0
if xMax>xVals.shape[1]: xMax = xVals.shape[1]
if yMax>yVals.shape[0]: yMax = yVals.shape[0]
# Compute averages
ySlice = Image[:,xMin:xMax].mean(1); # along y, so use x center
xSlice = Image[yMin:yMax,:].mean(0); # along x, so use y center
yValsSlice = yVals[:,0].copy();
xValsSlice = xVals[0,:].copy();
return ((xValsSlice, xSlice), (yValsSlice, ySlice));
def ImageCrop(xVals, yVals, Image, r0, Width, Scaled = False, Center = True):
"""
crops an image along with the associated matrix of x and y
to a specified area and returns the cropped image
this will be a copy not a view
Image, xVals, yVals : (2D image, xvals, yvals)
r0 : center of ROI in physical units (two element list or array)
Width : length of box-sides in physical units (two element list or array)
Scaled : If true, will attempt to use the x and y waves, to generate pixel values
Center : Recenter on cropped region
"""
error = False;
Cropped_Image={'OptDepth':0,'xVals':0,'yVals':0,'Error':error}
if (Scaled):
if(ScaleTest(xVals, yVals)):
rMinPixel = numpy.floor(GetPixelCoordsFromImage(r0, -Width/2, xVals, yVals));
rMaxPixel = numpy.ceil(GetPixelCoordsFromImage(r0, Width/2, xVals, yVals));
else:
rMinPixel = numpy.floor(r0)-numpy.floor(Width/2);
rMaxPixel = numpy.ceil(r0)+numpy.ceil(Width/2);
error = True;
else:
rMinPixel = numpy.floor(r0)-numpy.floor(Width/2);
rMaxPixel = numpy.ceil(r0)+numpy.ceil(Width/2);
if rMinPixel[0]<0: rMinPixel[0]=0
if rMinPixel[1]<0: rMinPixel[1]=0
if rMaxPixel[0]>xVals.shape[1]: rMaxPixel[0] = xVals.shape[1]
if rMaxPixel[1]>yVals.shape[0]: rMaxPixel[1] = yVals.shape[0]
Cropped_Image['OptDepth'] = Image[rMinPixel[1]:rMaxPixel[1],rMinPixel[0]:rMaxPixel[0]].copy();
Cropped_Image['xVals'] = xVals[rMinPixel[1]:rMaxPixel[1],rMinPixel[0]:rMaxPixel[0]].copy();
Cropped_Image['yVals'] = yVals[rMinPixel[1]:rMaxPixel[1],rMinPixel[0]:rMaxPixel[0]].copy();
if (Center):
Cropped_Image['xVals'] -= r0[0];
Cropped_Image['yVals'] -= r0[1];
return Cropped_Image;
def ImageSliceFromMax(Image, width, pScale = True):
"""
Produces a pair of slices from image of a band with 'Width' centered at the maximum val of Image
Scaled : 'False' use pixels directly, and 'True' compute scaling from (xvals and yvals) assuming
they are linearly spaced
Currently Width and x,y are in scaled units, not pixel units.
the return will be ((xvals xslice) (yvals yslice)), where each entry is a numpy array.
these are copies, not views.
"""
Z = scipy.ndimage.gaussian_filter(Image['OptDepth'], sigma=3);
id = Z.argmax()
r0max = (numpy.ravel(Image['xVals'])[id], numpy.ravel(Image['yVals'])[id])
imgSlice = ImageSlice(Image['xVals'], Image['yVals'], Image['OptDepth'], r0max, width, Scaled = pScale)
imgSlicefromMax={'xVals':0,'yVals':0,'xSlice':0, 'ySlice':0, 'xMax':r0max[0], 'yMax':r0max[1]}
imgSlicefromMax['yVals'] = imgSlice[1][0]
imgSlicefromMax['xVals'] = imgSlice[0][0]
imgSlicefromMax['ySlice'] = imgSlice[1][1]
imgSlicefromMax['xSlice'] = imgSlice[0][1]
return imgSlicefromMax
def GetPixelCoordsFromImage(r0, Offset, xVals, yVals):
"""
Returns the pixel coordinates associated with the scaled values in the 2D arrays xVals and yVals
remember in r0 the ordering is r0 = (x0, y0)
"""
# Assume that the correct arrays were passed
dy = yVals[1][0] - yVals[0][0];
dx = xVals[0][1] - xVals[0][0];
y0 = yVals[0][0];
x0 = xVals[0][0];
#want offset to be an integer number of pixels
Offset = numpy.round(Offset/numpy.array([dx,dy]));
return (r0 - numpy.array([x0, y0])) /numpy.array([dx, dy])+Offset;
def ScaleTest(xVals, yVals):
"""
Returns the pixel coordinates associated with the scaled values in the 2D arrays xVals and yVals
remember in r0 the ordering is r0 = (x0, y0)
"""
# Assume that the correct arrays were passed
dy = yVals[1][0] - yVals[0][0];
dx = xVals[0][1] - xVals[0][0];
if ((dx == 0) or (dy == 0)):
print("ImageSlice: generating scaled axes failed");
print(dx,dy,xVals[0][1],xVals[0][0],yVals[1][0],yVals[0][0],xVals,yVals)
return False;
else:
return True;
| 2.84375
| 3
|
adls_management/utils/settings.py
|
jacbeekers/metalake-file-management
| 0
|
12782684
|
<reponame>jacbeekers/metalake-file-management
import json
from adls_management.utils import messages
class GenericSettings:
"""
Some generic utilities, e.g. reading the config.json
"""
code_version = "0.1.0"
def __init__(self, configuration_file="resources/connection_config.json"):
# config.json settings
self.main_config_file = configuration_file
self.meta_version = None
self.output_directory = None
self.log_config = None
self.suppress_azure_call = False
self.azure_http_proxy = None
self.azure_https_proxy = None
self.storage_account_name = None
self.storage_account_key = None
self.storage_container = None
self.azure_secrets = None
self.file_locations = None
self.incoming = None
self.todo = None
self.busy = None
self.done = None
self.redo = None
self.hist = None
self.process_locations = False
self.download_location = "jsons"
def get_config(self):
"""
get the main configuration settings. default file is resources/config.json
"""
module = __name__ + ".get_config"
result = messages.message["undetermined"]
try:
with open(self.main_config_file) as config:
data = json.load(config)
# self.schema_directory = self.base_schema_folder + self.meta_version + "/"
if "azure_secrets" in data:
self.azure_secrets = data["azure_secrets"]
if "file_locations" in data:
self.file_locations = data["file_locations"]
if "suppress_azure_call" in data:
if data["suppress_azure_call"] == "True":
self.suppress_azure_call = True
elif data["suppress_azure_call"] == "False":
self.suppress_azure_call = False
else:
print("Incorrect config value >" + data["suppress_azure_call"]
+ "< for suppress_azure_call. Must be True or False. Will default to False")
self.suppress_azure_call = False
if "download_location" in data:
self.download_location = data["download_location"]
result = messages.message["ok"]
except FileNotFoundError:
print("FATAL:", module, "could find not main configuration file >" + self.main_config_file + "<.")
return messages.message["main_config_not_found"]
if self.azure_secrets is None:
print("azure secrets are unknown. Please set azure_secrets in the main config file.")
else:
azure_secrets_result = self.get_azure_secrets(self.azure_secrets)
if azure_secrets_result["code"] != "OK":
# print("get_azure_secrets returned: " + azure_secrets_result["code"], module)
return azure_secrets_result
if self.file_locations is None:
print("file locations are unknown. Please set file_locations in the main config file.")
else:
file_locations_result = self.get_file_locations(self.file_locations)
if file_locations_result["code"] != "OK":
# print("get_file_locations returned: " + file_locations_result["code"], module)
return file_locations_result
return result
def get_azure_proxy(self):
if self.azure_http_proxy == "None":
self.azure_http_proxy = None
if self.azure_https_proxy == "None":
self.azure_https_proxy = None
proxies = {
"http": self.azure_http_proxy,
"https": self.azure_https_proxy
}
return proxies
def get_file_locations(self, config_file):
if config_file is None:
return messages.message["not_provided"]
try:
with open(config_file) as locations:
data = json.load(locations)
self.incoming = data["incoming"]
self.todo = data["todo"]
self.busy = data["busy"]
self.done = data["done"]
self.redo = data["redo"]
self.hist = data["hist"]
self.process_locations = True
except FileNotFoundError as e:
print("File with file_locations >" + self.file_locations + "< could not be found.")
return messages.message["file_locations_not_found"]
return messages.message["ok"]
def get_azure_secrets(self, azure_secrets="resources/azure.secrets"):
module = __name__ + ".get_azure_secrets"
try:
with open(azure_secrets) as azure:
data = json.load(azure)
result = self.determine_azure_secrets(data)
if result["code"] != "OK":
print("ERROR: Determine azure secrets returned: " + result["code"])
return result
except FileNotFoundError:
print("ERROR: Cannot find provided azure_secrets file >" + self.azure_secrets + "<."
, module)
return messages.message["azure_secrets_not_found"]
return messages.message["ok"]
def determine_azure_secrets(self, data):
module = __name__ + ".determine_azure_secrets"
if "meta_version" in data:
main_meta_version = data["meta_version"][:3]
if main_meta_version != "0.1":
print("Unsupported meta_version >" + data["meta_version"] + "<."
, module)
return messages.message["unsupported_meta_version_azure_secrets"]
else:
print("Backward compatible azure secrets file detected. Please update to a later version."
, module)
self.storage_account_name = data["storage_account_name"]
self.storage_account_key = data["storage_account_key"]
self.storage_container = data["container"]
# print("container: ", self.storage_container)
if "azure_http_proxy" in data:
self.azure_http_proxy = data["azure_http_proxy"]
# print("HTTP Proxy for Azure taken from azure secrets file: "
# + self.azure_http_proxy, module)
# else:
# print("No HTTP Proxy for Azure found in azure secrets file. "
# + "This is OK if no proxy is needed or has been set through the environment variable HTTP_PROXY"
# , module)
if "azure_https_proxy" in data:
self.azure_https_proxy = data["azure_https_proxy"]
# print("HTTPS Proxy for Azure taken from azure secrets file: "
# + self.azure_https_proxy, module)
# else:
# print("No HTTPS Proxy for Azure found in azure secrets file. "
# + "This is OK if no proxy is needed or has been set through the environment variable HTTPS_PROXY"
# , module)
return messages.message["ok"]
| 2.265625
| 2
|
src/strategy/Hand.py
|
fuqinshen/Python--
| 31
|
12782685
|
HANDVALUE_GUU = 0
HANDVALUE_CHO = 1
HANDVALUE_PAA = 2
name = ["石头", "剪刀", "布"]
class Hand:
def __init__(self, handvalue):
self.handvalue = handvalue
@staticmethod
def get_hand(handvalue):
return hand[handvalue]
def is_stronger_than(self, h):
return self.fight(h) == 1
def is_weaker_than(self, h):
return self.fight(h) == -1
def fight(self, h):
if self == h:
return 0
elif (self.handvalue + 1) % 3 == h.handvalue:
return 1
else:
return -1
def __str__(self):
return name[self.handvalue]
hand = [Hand(HANDVALUE_GUU), Hand(HANDVALUE_CHO), Hand(HANDVALUE_PAA)]
| 3.515625
| 4
|
data/train/python/6b731386581fcd9f07149ee9c71d2e795e33dc18__init__.py
|
harshp8l/deep-learning-lang-detection
| 84
|
12782686
|
# -*- coding: utf-8 -*-
# vim: sw=4 ts=4 fenc=utf-8 et
# ==============================================================================
# Copyright © 2010 UfSoft.org - <NAME> <<EMAIL>>
#
# License: BSD - Please view the LICENSE file for additional information.
# ==============================================================================
from ilog.views import account, admin, base, networks
from ilog.views.admin import options
from ilog.views.admin.manage import (users, groups, networks as admin_networks,
channels, bots)
all_views = {
# Main Handler
'index' : base.index,
# Account Handlers
'account.rpx' : account.rpx_post,
'account.login' : account.login,
'account.logout' : account.logout,
'account.delete' : account.delete,
'account.profile' : account.profile,
'account.register' : account.register,
'account.activate' : account.activate_account,
'account.dashboard' : account.dashboard,
'account.rpx_providers' : account.rpx_providers_post,
# Network Handlers
'network.index' : networks.index,
'network.channels' : networks.channels,
# Channel Handlers
'channel.index' : '',
'channel.browse' : '',
# Administration
'admin.index' : admin.index,
'admin.manage.groups' : groups.list,
'admin.manage.groups.new' : groups.edit,
'admin.manage.groups.edit' : groups.edit,
'admin.manage.groups.delete' : groups.delete,
'admin.manage.users' : users.list,
'admin.manage.users.new' : users.edit,
'admin.manage.users.edit' : users.edit,
'admin.manage.users.delete' : users.delete,
'admin.manage.networks' : admin_networks.list,
'admin.manage.networks.new' : admin_networks.edit,
'admin.manage.networks.edit' : admin_networks.edit,
'admin.manage.networks.delete' : admin_networks.delete,
'admin.manage.bots' : users.list,
'admin.manage.bots.new' : users.edit,
'admin.manage.bots.edit' : users.edit,
'admin.manage.bots.delete' : users.delete,
'admin.options.basic' : options.basic_options,
'admin.options.advanced' : options.advanced_options,
'admin.options.rpxnow' : options.rpxnow_options,
'admin.options.gravatar' : options.gravatar_options,
'admin.options.email' : options.email_options,
'admin.options.cache' : options.cache_options,
}
| 1.710938
| 2
|
utils/aneurysm_utils/evaluation.py
|
leoseg/AneurysmSegmentation
| 1
|
12782687
|
import os
import json
import multiprocessing
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import matplotlib.animation
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from scipy.spatial import distance_matrix
import nibabel as nib
from scipy.ndimage.interpolation import zoom
from scipy import ndimage
from sklearn.metrics import jaccard_score
from skimage.metrics import hausdorff_distance
from scipy.stats import pearsonr
from aneurysm_utils.preprocessing import resize_mri
from aneurysm_utils.environment import Environment
from collections import defaultdict
from sklearn import metrics as sk_metrics
from sklearn.preprocessing import MinMaxScaler
#import open3d
def evaluate_model(
y_true: list, y_pred: list, segmentation: bool = None, prefix: str = None
) -> dict:
metrics = {}
if segmentation:
y_true = np.concatenate(y_true).ravel()
y_pred = np.concatenate(y_pred).ravel()
if not prefix:
prefix = ""
else:
prefix = prefix + "_"
metrics[prefix + "accuracy"] = sk_metrics.accuracy_score(y_true, y_pred)
metrics[prefix + "bal_acc"] = sk_metrics.balanced_accuracy_score(y_true, y_pred)
try:
metrics[prefix + "precision"] = sk_metrics.precision_score(y_true, y_pred)
metrics[prefix + "recall"] = sk_metrics.recall_score(y_true, y_pred)
metrics[prefix + "spec"] = sk_metrics.recall_score(y_true, y_pred, pos_label=0)
metrics[prefix + "sen"] = sk_metrics.recall_score(y_true, y_pred, pos_label=1)
metrics[prefix + "f1"] = sk_metrics.f1_score(y_true, y_pred)
except Exception:
print(
"precision/recall/spec/sen/f1 are not supported for non-binary classification."
)
print("Accuracy (" + prefix + "): " + str(metrics[prefix + "accuracy"]))
print("Balanced Accuracy (" + prefix + "): " + str(metrics[prefix + "bal_acc"]))
print(sk_metrics.classification_report(y_true, y_pred))
return metrics
# Transparent colormap (alpha to red), that is used for plotting an overlay.
# See https://stackoverflow.com/questions/37327308/add-alpha-to-an-existing-matplotlib-colormap
alpha_to_red_cmap = np.zeros((256, 4))
alpha_to_red_cmap[:, 0] = 0.8
alpha_to_red_cmap[:, -1] = np.linspace(0, 1, 256) # cmap.N-20) # alpha values
alpha_to_red_cmap = mpl.colors.ListedColormap(alpha_to_red_cmap)
red_to_alpha_cmap = np.zeros((256, 4))
red_to_alpha_cmap[:, 0] = 0.8
red_to_alpha_cmap[:, -1] = np.linspace(1, 0, 256) # cmap.N-20) # alpha values
red_to_alpha_cmap = mpl.colors.ListedColormap(red_to_alpha_cmap)
def animate_slices(
struct_arr,
overlay=None,
axis=0,
reverse_direction=False,
interval=40,
vmin=None,
vmax=None,
overlay_vmin=None,
overlay_vmax=None,
):
"""
Create a matplotlib animation that moves through a 3D image along a specified axis.
"""
if vmin is None:
vmin = struct_arr.min()
if vmax is None:
vmax = struct_arr.max()
if overlay_vmin is None and overlay is not None:
overlay_vmin = overlay.min()
if overlay_vmax is None and overlay is not None:
overlay_vmax = overlay.max()
fig, ax = plt.subplots()
axis_label = ["x", "y", "z"][axis]
# TODO: If I select slice 50 here at the beginning, the plots look different.
im = ax.imshow(
np.take(struct_arr, 0, axis=axis),
vmin=vmin,
vmax=vmax,
cmap="gray",
interpolation=None,
animated=True,
)
if overlay is not None:
im_overlay = ax.imshow(
np.take(overlay, 0, axis=axis),
vmin=overlay_vmin,
vmax=overlay_vmax,
cmap=alpha_to_red_cmap,
interpolation=None,
animated=True,
)
text = ax.text(
0.03,
0.97,
"{}={}".format(axis_label, 0),
color="white",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
ax.axis("off")
def update(i):
im.set_array(np.take(struct_arr, i, axis=axis))
if overlay is not None:
im_overlay.set_array(np.take(overlay, i, axis=axis))
text.set_text("{}={}".format(axis_label, i))
return im, text
num_frames = struct_arr.shape[axis]
if reverse_direction:
frames = np.arange(num_frames - 1, 0, -1)
else:
frames = np.arange(0, num_frames)
return mpl.animation.FuncAnimation(
fig, update, frames=frames, interval=interval, blit=True
)
def plot_slices(
struct_arr,
num_slices=7,
cmap="gray",
vmin=None,
vmax=None,
overlay=None,
overlay_cmap=alpha_to_red_cmap,
overlay_vmin=None,
overlay_vmax=None,
):
"""
Plot equally spaced slices of a 3D image (and an overlay) along every axis
Args:
struct_arr (3D array or tensor): The 3D array to plot (usually from a nifti file).
num_slices (int): The number of slices to plot for each dimension.
cmap: The colormap for the image (default: `'gray'`).
vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `struct_arr`.
vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `struct_arr`.
overlay (3D array or tensor): The 3D array to plot as an overlay on top of the image. Same size as `struct_arr`.
overlay_cmap: The colomap for the overlay (default: `alpha_to_red_cmap`).
overlay_vmin (float): Same as in matplotlib.imshow. If `None`, take the global minimum of `overlay`.
overlay_vmax (float): Same as in matplotlib.imshow. If `None`, take the global maximum of `overlay`.
"""
if vmin is None:
vmin = struct_arr.min()
if vmax is None:
vmax = struct_arr.max()
if overlay_vmin is None and overlay is not None:
overlay_vmin = overlay.min()
if overlay_vmax is None and overlay is not None:
overlay_vmax = overlay.max()
print(vmin, vmax, overlay_vmin, overlay_vmax)
fig, axes = plt.subplots(3, num_slices, figsize=(15, 6))
intervals = np.asarray(struct_arr.shape) / num_slices
for axis, axis_label in zip([0, 1, 2], ["x", "y", "z"]):
for i, ax in enumerate(axes[axis]):
i_slice = int(np.round(intervals[axis] / 2 + i * intervals[axis]))
# print(axis_label, 'plotting slice', i_slice)
plt.sca(ax)
plt.axis("off")
plt.imshow(
sp.ndimage.rotate(np.take(struct_arr, i_slice, axis=axis), 90),
vmin=vmin,
vmax=vmax,
cmap=cmap,
interpolation=None,
)
plt.text(
0.03,
0.97,
"{}={}".format(axis_label, i_slice),
color="white",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
if overlay is not None:
plt.imshow(
sp.ndimage.rotate(np.take(overlay, i_slice, axis=axis), 90),
cmap=overlay_cmap,
vmin=overlay_vmin,
vmax=overlay_vmax,
interpolation=None,
)
def draw_mask_3d(image:np.array,ax:Axes3D=None,zorder:int=0,markersize:float=0.8,alpha:float=1,c=None):
"""
Draws all points which are not zero of given image in scatterplot
Parameters
----------
image: where to get mask from
ax: if given uses this axis object
zorder: order of points drawn
markersize: size of points
alpha: transparency of points
c: if anything points will be black
"""
fig = plt.figure()
if ax==None:
ax = Axes3D(fig)
else:
ax=ax
for cluster in range(1,int(np.unique(image)[-1]+1)):
if len(np.argwhere(image==cluster))==0:
print("no aneurysm found")
continue
if c==None:
ax.scatter(np.argwhere(image==cluster).T[0],np.argwhere(image==cluster).T[1],np.argwhere(image==cluster).T[2],s=markersize,alpha=alpha,zorder=zorder)
else:
ax.scatter(np.argwhere(image==cluster).T[0],np.argwhere(image==cluster).T[1],np.argwhere(image==cluster).T[2],s=3,alpha=alpha,zorder=zorder,c="black")
def draw_image(image:np.array,ax:Axes3D=None,zorder:int=0,markersize:float=0.8,transparency:bool=True):
"""
Draws all points which are not zero of given image in scatterplot in colors according to their intensity
Parameters
----------
image: where to get mask from
ax: if given uses this axis object
zorder: order of points drawn
markersize: size of points
transparency: if true scales transparency with intensity values
"""
fig = plt.figure()
if ax==None:
ax = Axes3D(fig)
else:
ax=ax
if transparency:
alpha= image[image>0]
alpha = np.where(alpha>0.15,alpha,0.01)
else:
alpha=1
cmap = plt.get_cmap('YlOrRd')
ax.scatter(np.argwhere(image>0).T[0],np.argwhere(image>0).T[1],np.argwhere(image>0).T[2],s=markersize,alpha=image[image>0],zorder=zorder,c=cmap(image[image>0]))
def draw_bounding_box(candidates,ax:Axes3D=None):
"""
Draws bounding box of given bounding box dictionary -> see postprocessing function
Parameters
----------
image: list of dictionaries where entry vertices contains the points of the bounding box
ax: if given uses this axis object
"""
fig = plt.figure()
if ax==None:
ax = Axes3D(fig)
else:
ax=ax
for candidate in candidates:
Z= candidate["vertices"]
Z=np.array(Z)
verts= [(Z[0],Z[1]),(Z[0],Z[2]),(Z[0],Z[3]),(Z[6],Z[1]),(Z[7],Z[1]),(Z[2],Z[5]),
(Z[2],Z[7]),(Z[3],Z[5]),(Z[3],Z[6]),(Z[4],Z[7]),(Z[4],Z[6]),(Z[4],Z[5])]
for element in verts:
x=[element[0][0],element[1][0]]
y=[element[0][1],element[1][1]]
z=[element[0][2],element[1][2]]
ax.plot(x,y,z,c='r',linewidth=2,alpha=1)
fig.show()
| 1.828125
| 2
|
power_generalise_gs.py
|
syzhang/adec_power
| 0
|
12782688
|
<gh_stars>0
"""
simulated power calculation for generalisation instrumetnal avoidance task (with gen in model)
"""
import sys, os
import pickle
import numpy as np
import pandas as pd
import pystan
# from hbayesdm.models import generalise_gs
# from hbayesdm import rhat, print_fit, plot_hdi, hdi
def sigmoid(p):
return 1./(1.+np.exp(-p))
def softmax_perception(s_cue, Q, beta, bias):
"""probability of avoidance given stim and params"""
avoid_cost = 0.2 # fixed
if s_cue == 1:
pred_V = 0.75*Q[s_cue-1] + 0.25*Q[2-1]
elif s_cue == 3:
pred_V = 0.75*Q[s_cue-1] + 0.25*Q[2-1]
elif s_cue == 5:
pred_V = 0.75*Q[s_cue-1] + 0.25*Q[6-1]
elif s_cue == 7:
pred_V = 0.75*Q[s_cue-1] + 0.25*Q[6-1]
else:
pred_V = Q[s_cue-1]
# avoidance probability
gx = 1./(1. + np.exp(-beta*(0. - pred_V - avoid_cost - bias)))
return gx
def draw_cue(num_trial):
"""drawing cues based on given probability"""
trial_type = []
p_cues = [0.052, 0.264, 0.052, 0.264,
0.052, 0.264, 0.052]
for i in range(num_trial):
smp = np.random.choice(7, p=p_cues)
trial_type.append(smp+1) # match matlab number
return trial_type
def model_generalise_gs(param_dict, subjID, num_trial=190):
"""simulate shapes, avoid actions, and shock outcomes"""
# load predefined image sequences (38 trials x 5 blocks)
# trial_type = np.squeeze(pd.read_csv('./probs/generalise_stim.csv').values) # 1:7
trial_type = draw_cue(num_trial)
num_state = len(np.unique(trial_type))
# initialise values (values from AN's VBA code)
# Q = sigmoid(-0.2) * np.ones(num_state) # 7 possible cues
Q = np.zeros(num_state) # 7 possible cues
alpha = sigmoid(0.95) * np.ones(1) # initial learning rate
# initialise output
data_out = []
# simulate trials
for t in range(num_trial):
# a cue is shown (not sure how it's generated)
s_cue = int(trial_type[t])
# avoid or not
p_avoid = softmax_perception(s_cue, Q, param_dict['beta'], param_dict['bias'])
a = int(np.random.binomial(size=1, n=1, p=p_avoid))
# a = int(np.random.choice([0,1], size=1,
# p=[1-p_avoid, p_avoid], replace=True))
# print(p_avoid, a)
# deliver shock or not
if (s_cue == 2 or s_cue == 4) and a == 0: # if CS+1 or 2 shock trials and no avoidance made
r = -1
else:
r = 0
# define sensory params
# mean_theta = 0.25
# rhos = [0.25-mean_theta, 0.25, 0.25+mean_theta, 0.75-2*mean_theta, 0.75-mean_theta, 0.75, 0.75+mean_theta]
rhos = np.array([0.0, 0.25, 0.5, 0.25, 0.5, 0.75, 1.0])
# compute PE and update values
if a == 0: # did not avoid
# PE update
PE = r - Q[s_cue-1]
# define sigma
if r == 0: # no shock
sigma_t = param_dict['sigma_n']
else: # shock
sigma_t = param_dict['sigma_a']
# update Q
# for s in range(num_state):
# rho = rhos[s]
# G = 1./np.exp((rho-cue_rho)**2. / (2.*sigma_t**2.))
# Q[s] += param_dict['kappa'] * alpha * PE * G
# current cue rho value
cue_rho = rhos[s_cue-1] * np.ones(num_state)
diff2 = (rhos-cue_rho)**2. / (2.*(sigma_t**2.))
G = 1./np.exp(diff2)
Q += param_dict['kappa'] * alpha * PE * G
else: # avoided
PE = 0.
Q = Q
# update alpha
alpha = param_dict['eta']*np.abs(PE) + (1-param_dict['eta'])*alpha
# output
data_out.append([subjID, t, s_cue, a, r])
df_out = pd.DataFrame(data_out)
df_out.columns = ['subjID', 'trial', 'cue', 'choice', 'outcome']
# print(df_out)
return df_out
def sim_generalise_gs(param_dict, sd_dict, group_name, seed,
num_sj=50, num_trial=190, model_name='generalise_gs'):
"""simulate generalise instrumental avoidance task for multiple subjects"""
multi_subject = []
# generate new params
np.random.seed(seed)
sample_params = dict()
for key in param_dict:
sample_params[key] = np.random.normal(param_dict[key], sd_dict[key], size=1)[0]
for sj in range(num_sj):
df_sj = model_generalise_gs(sample_params, sj, num_trial)
multi_subject.append(df_sj)
df_out = pd.concat(multi_subject)
# saving output
output_dir = './tmp_output/generalise_sim/'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
f_name = model_name+'_'+group_name+'_'+str(seed)
df_out.to_csv(output_dir+f_name+'.txt', sep='\t', index=False)
print(df_out)
def generalise_gs_preprocess_func(txt_path):
"""parse simulated data for pystan"""
# Iterate through grouped_data
subj_group = pd.read_csv(txt_path, sep='\t')
# Use general_info(s) about raw_data
subj_ls = np.unique(subj_group['subjID'])
n_subj = len(subj_ls)
t_subjs = np.array([subj_group[subj_group['subjID']==x].shape[0] for x in subj_ls])
t_max = max(t_subjs)
# Initialize (model-specific) data arrays
cue = np.full((n_subj, t_max), 0, dtype=int)
choice = np.full((n_subj, t_max), -1, dtype=int)
outcome = np.full((n_subj, t_max), -1, dtype=float)
# Write from subj_data to the data arrays
for s in range(n_subj):
subj_data = subj_group[subj_group['subjID']==s]
t = t_subjs[s]
cue[s][:t] = subj_data['cue']
choice[s][:t] = subj_data['choice']
outcome[s][:t] = -1 * np.abs(subj_data['outcome']) # Use abs
# Wrap into a dict for pystan
data_dict = {
'N': n_subj,
'T': t_max,
'Tsubj': t_subjs,
'cue': cue,
'choice': choice,
'outcome': outcome,
}
# print(data_dict)
# Returned data_dict will directly be passed to pystan
return data_dict
if __name__ == "__main__":
# parameters AMT dataset (highest 10% approx patient params, low 40% approx as control params)
# params made up
param_dict_hc = {
'sigma_a': 0.45, # generalisation param for shock
'sigma_n': 0.06, # generalisation param for no shock
'eta': 0.17, # p_h dynamic learning rate
'kappa': 0.75, # p_h dynamic learning rate
'beta': 9.5, # softmax beta
'bias': 0.3 # softmax bias
}
# hc sd
sd_dict_hc = {
'sigma_a': 0.05, # generalisation param for shock
'sigma_n': 0.01, # generalisation param for no shock
'eta': 0.1, # p_h dynamic learning rate
'kappa': 0.2, # p_h dynamic learning rate
'beta': 2, # softmax beta
'bias': 0.1 # softmax bias
}
# patient params
param_dict_pt = {
'sigma_a': 0.85, # generalisation param for shock
'sigma_n': 0.03, # generalisation param for no shock
'eta': 0.18, # p_h dynamic learning rate
'kappa': 0.76, # p_h dynamic learning rate
'beta': 4.3, # softmax beta
'bias': 0.3 # softmax bias
}
# patient sd
sd_dict_pt = {
'sigma_a': 0.05, # generalisation param for shock
'sigma_n': 0.01, # generalisation param for no shock
'eta': 0.10, # p_h dynamic learning rate
'kappa': 0.2, # p_h dynamic learning rate
'beta': 2, # softmax beta
'bias': 0.1 # softmax bias
}
# parsing cl arguments
group_name = sys.argv[1] # pt=patient, hc=control
seed_num = int(sys.argv[2]) # seed number
subj_num = int(sys.argv[3]) # subject number to simulate
trial_num = int(sys.argv[4]) # trial number to simulate
model_name = 'generalise_gs'
if group_name == 'hc':
# simulate hc subjects with given params
sim_generalise_gs(param_dict_hc, sd_dict_hc, group_name, seed=seed_num,num_sj=subj_num, model_name=model_name)
elif group_name == 'pt':
# simulate pt subjects with given params
sim_generalise_gs(param_dict_pt, sd_dict_pt, group_name, seed=seed_num, num_sj=subj_num, model_name=model_name)
else:
print('check group name (hc or pt)')
# parse simulated data
txt_path = f'./tmp_output/generalise_sim/generalise_gs_{group_name}_{seed_num}.txt'
data_dict = generalise_gs_preprocess_func(txt_path)
# fit stan model
sm = pystan.StanModel(file='generalise_gs.stan')
fit = sm.sampling(data=data_dict, iter=3000, chains=2)
print(fit)
# saving
pars = ['mu_sigma_a', 'mu_sigma_n', 'mu_eta', 'mu_kappa', 'mu_beta', 'mu_bias']
extracted = fit.extract(pars=pars, permuted=True)
# print(extracted)
sfile = f'./tmp_output/generalise_sim/{group_name}_sim_{seed_num}.pkl'
with open(sfile, 'wb') as op:
tmp = { k: v for k, v in extracted.items() if k in pars } # dict comprehension
pickle.dump(tmp, op)
# hbayesdm method
# # fit
# # Run the model and store results in "output"
# output = generalise_gs('./tmp_output/generalise_sim/'+model_name+'_'+group_name+'_'+str(seed_num)+'.txt', niter=3000, nwarmup=1500, nchain=4, ncore=16)
# # debug
# print(output.fit)
# # saving
# sfile = './tmp_output/generalise_sim/'+group_name+'_sim_'+str(seed_num)+'.pkl'
# with open(sfile, 'wb') as op:
# tmp = { k: v for k, v in output.par_vals.items() if k in ['mu_sigma_a', 'mu_sigma_n', 'mu_eta', 'mu_kappa', 'mu_beta', 'mu_bias'] } # dict comprehension
# pickle.dump(tmp, op)
| 2.203125
| 2
|
pure_fa_exporter.py
|
zleinweber/pure-exporter
| 22
|
12782689
|
#!/usr/bin/env python
from flask import Flask, request, abort, make_response
from flask_httpauth import HTTPTokenAuth
from urllib.parse import parse_qs
import re
from prometheus_client import generate_latest, CollectorRegistry, CONTENT_TYPE_LATEST
from flasharray_collector import FlasharrayCollector
import logging
class InterceptRequestMiddleware:
def __init__(self, wsgi_app):
self.wsgi_app = wsgi_app
def __call__(self, environ, start_response):
d = parse_qs(environ['QUERY_STRING'])
api_token = d.get('apitoken', [''])[0] # Returns the first api-token value
if 'HTTP_AUTHORIZATION' not in environ:
environ['HTTP_AUTHORIZATION'] = 'Bearer ' + api_token
return self.wsgi_app(environ, start_response)
app = Flask(__name__)
app.logger.setLevel(logging.INFO)
app.wsgi_app = InterceptRequestMiddleware(app.wsgi_app)
auth = HTTPTokenAuth(scheme='Bearer')
@auth.verify_token
def verify_token(token):
pattern_str = r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
regx = re.compile(pattern_str)
match = regx.search(token)
return token if match is not None else False
@app.route('/')
def route_index():
"""Display an overview of the exporters capabilities."""
return '''
<h1>Pure Storage Prometeus Exporter</h1>
<table>
<thead>
<tr>
<td>Type</td>
<td>Endpoint</td>
<td>GET parameters</td>
</tr>
</thead>
<tbody>
<tr>
<td>Full metrics</td>
<td><a href="/metrics?endpoint=host&apitoken=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx">/metrics</a></td>
<td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td>
</tr>
<tr>
<td>Volume metrics</td>
<td><a href="/metrics/volumes?endpoint=host&apitoken=<KEY>">/metrics/volumes</a></td>
<td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td>
<td>Retrieves only volume related metrics</td>
</tr>
<tr>
<td>Host metrics</td>
<td><a href="/metrics/hosts?endpoint=host&apitoken=<KEY>">/metrics/hosts</a></td>
<td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td>
<td>Retrieves only host related metrics</td>
</tr>
<tr>
<td>Pod metrics</td>
<td><a href="/metrics/pods?endpoint=host&apitoken=<KEY>">/metrics/pods</a></td>
<td>endpoint, apitoken (optional, required only if authentication tokem is not provided)</td>
<td>Retrieves only pod related metrics</td>
</tr>
</tbody>
</table>
'''
@app.route('/metrics/<m_type>', methods=['GET'])
@auth.login_required
def route_flasharray(m_type: str):
"""Produce FlashArray metrics."""
if not m_type in ['array', 'volumes', 'hosts', 'pods']:
m_type = 'all'
collector = FlasharrayCollector
registry = CollectorRegistry()
try:
endpoint = request.args.get('endpoint', None)
token = auth.current_user()
registry.register(collector(endpoint, token, m_type))
except Exception as e:
app.logger.warn('%s: %s', collector.__name__, str(e))
abort(500)
resp = make_response(generate_latest(registry), 200)
resp.headers['Content-type'] = CONTENT_TYPE_LATEST
return resp
@app.route('/metrics', methods=['GET'])
def route_flasharray_all():
return route_flasharray('all')
@app.errorhandler(400)
def route_error_400(error):
"""Handle invalid request errors."""
return 'Invalid request parameters', 400
@app.errorhandler(404)
def route_error_404(error):
""" Handle 404 (HTTP Not Found) errors."""
return 'Not found', 404
@app.errorhandler(500)
def route_error_500(error):
"""Handle server-side errors."""
return 'Internal server error', 500
# Run in debug mode when not called by WSGI
if __name__ == "__main__":
app.logger.setLevel(logging.DEBUG)
app.logger.debug('running in debug mode...')
app.run(host="0.0.0.0", port=8080, debug=True)
| 2.203125
| 2
|
LumiGAN/matplotlibstyle.py
|
acdc-pv-unsw/LumiGAN
| 0
|
12782690
|
"Defines matplotlib stylesheet"
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# %%-- Matplotlib style sheet
mpl.style.use('seaborn-paper')
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] ='STIXGeneral'
mpl.rcParams['font.size'] = 14
mpl.rcParams['mathtext.default'] = 'rm'
mpl.rcParams['mathtext.fallback'] = 'cm'
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['axes.labelweight'] = 'normal'
mpl.rcParams['axes.grid.which']='both'
mpl.rcParams['axes.xmargin']=0.05
mpl.rcParams['axes.ymargin']=0.05
mpl.rcParams['grid.linewidth']= 0
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['xtick.bottom'] = True
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.left'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['ytick.labelsize'] = 14
mpl.rcParams['legend.fontsize'] = 14
mpl.rcParams['figure.titlesize'] = 18
mpl.rcParams['figure.figsize'] = (8.09,5)
mpl.rcParams['figure.autolayout'] = False
mpl.rcParams['figure.dpi'] = 75
mpl.rcParams['image.cmap'] = "viridis"
mpl.rcParams['savefig.dpi'] = 150
mpl.rcParams['errorbar.capsize'] = 3
mpl.rcParams['axes.prop_cycle'] = plt.cycler(color = plt.cm.viridis([0.8,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]))
mpl.rcParams['axes.titlesize'] = 16
# %%-
| 2.515625
| 3
|
web-server/model/__init__.py
|
sanfengliao/DeepNavi
| 0
|
12782691
|
<gh_stars>0
from .map import *
from .edge import *
from .point import *
from .basic_pb2 import *
from .loc import *
| 1.070313
| 1
|
solutions/2021/prob_07.py
|
PolPtoAmo/HPCodeWarsBCN
| 1
|
12782692
|
chars = list()
kjdas = False
for char in input().lower().replace(".", "").replace(",", "").replace("#", "").replace("'", "").replace("\"", "").replace(":", "").replace(";", "").replace("-", ""):
if char in chars:
print("Not an isogram")
kjdas = True
break
chars.append(char)
if not kjdas:
print("Isogram detected")
| 4.03125
| 4
|
relevanceai/operations_new/cluster/ops.py
|
RelevanceAI/RelevanceAI
| 21
|
12782693
|
<reponame>RelevanceAI/RelevanceAI
import warnings
from copy import deepcopy
from typing import Optional, Union, Callable, Dict, Any, Set, List
from relevanceai.utils.decorators.analytics import track
from relevanceai.operations_new.apibase import OperationAPIBase
from relevanceai.operations_new.cluster.alias import ClusterAlias
from relevanceai.operations_new.cluster.base import ClusterBase
from relevanceai.constants import Warning
from relevanceai.constants.errors import MissingClusterError
from relevanceai.constants import MissingClusterError, Warning
class ClusterOps(ClusterBase, OperationAPIBase, ClusterAlias):
"""
Cluster-related functionalities
"""
# These need to be instantiated on __init__
model_name: str
def __init__(
self,
dataset_id: str,
vector_fields: list,
alias: str,
cluster_field: str = "_cluster_",
verbose: bool = False,
model=None,
model_kwargs=None,
*args,
**kwargs,
):
"""
ClusterOps objects
"""
self.dataset_id = dataset_id
self.vector_fields = vector_fields
self.cluster_field = cluster_field
self.verbose = verbose
self.model = model
if isinstance(self.model, str):
self.model_name = self.model
else:
self.model_name = str(self.model)
if model_kwargs is None:
model_kwargs = {}
self.model_kwargs = model_kwargs
for k, v in kwargs.items():
setattr(self, k, v)
super().__init__(
dataset_id=dataset_id,
vector_fields=vector_fields,
alias=alias,
cluster_field=cluster_field,
verbose=verbose,
model=model,
model_kwargs=model_kwargs,
**kwargs,
)
# alias is set after model so that we can get the number of clusters
# if the model needs ot be instantiated
self.alias = self._get_alias(alias)
def _operate(self, cluster_id: str, field: str, output: dict, func: Callable):
"""
Internal function for operations
It takes a cluster_id, a field, an output dictionary, and a function, and then it gets all the
documents in the cluster, gets the field across all the documents, and then applies the function
to the field
Parameters
----------
cluster_id : str
str, field: str, output: dict, func: Callable
field : str
the field you want to get the value for
output : dict
dict
func : Callable
Callable
"""
cluster_field = self._get_cluster_field_name()
# TODO; change this to fetch all documents
documents = self._get_all_documents(
self.dataset_id,
filters=[
{
"field": cluster_field,
"filter_type": "exact_match",
"condition": "==",
"condition_value": cluster_id,
},
{
"field": field,
"filter_type": "exists",
"condition": ">=",
"condition_value": " ",
},
],
select_fields=[field, cluster_field],
show_progress_bar=False,
)
# get the field across each
arr = self.get_field_across_documents(field, documents)
output[cluster_id] = func(arr)
def _operate_across_clusters(self, field: str, func: Callable):
output: Dict[str, Any] = dict()
for cluster_id in self.list_cluster_ids():
self._operate(cluster_id=cluster_id, field=field, output=output, func=func)
return output
def list_cluster_ids(
self,
alias: str = None,
minimum_cluster_size: int = 0,
num_clusters: int = 1000,
):
"""
List unique cluster IDS
Example
---------
.. code-block::
from relevanceai import Client
client = Client()
cluster_ops = client.ClusterOps(
alias="kmeans_8", vector_fields=["sample_vector_]
)
cluster_ops.list_cluster_ids()
Parameters
-------------
alias: str
The alias to use for clustering
minimum_cluster_size: int
The minimum size of the clusters
num_clusters: int
The number of clusters
"""
# Mainly to be used for subclustering
# Get the cluster alias
cluster_field = self._get_cluster_field_name()
# currently the logic for facets is that when it runs out of pages
# it just loops - therefore we need to store it in a simple hash
# and then add them to a list
all_cluster_ids: Set = set()
while len(all_cluster_ids) < num_clusters:
facet_results = self.datasets.facets(
dataset_id=self.dataset_id,
fields=[cluster_field],
page_size=int(self.config["data.max_clusters"]),
page=1,
asc=True,
)
if "results" in facet_results:
facet_results = facet_results["results"]
if cluster_field not in facet_results:
raise MissingClusterError(alias=alias)
for facet in facet_results[cluster_field]:
if facet["frequency"] > minimum_cluster_size:
curr_len = len(all_cluster_ids)
all_cluster_ids.add(facet[cluster_field])
new_len = len(all_cluster_ids)
if new_len == curr_len:
return list(all_cluster_ids)
return list(all_cluster_ids)
def insert_centroids(
self,
centroid_documents,
) -> None:
"""
Insert centroids
Centroids look below
.. code-block::
cluster_ops = client.ClusterOps(
vector_field=["sample_1_vector_"],
alias="sample"
)
cluster_ops.insert_centroids(
centorid_documents={
"cluster-0": [1, 1, 1],
"cluster-2": [2, 1, 1]
}
)
"""
# Centroid documents are in the format {"cluster-0": [1, 1, 1]}
return self.datasets.cluster.centroids.insert(
dataset_id=self.dataset_id,
cluster_centers=self.json_encoder(centroid_documents),
vector_fields=self.vector_fields,
alias=self.alias,
)
def create_centroids(self):
"""
Calculate centroids from your vectors
Example
--------
.. code-block::
from relevanceai import Client
client = Client()
ds = client.Dataset("sample")
cluster_ops = ds.ClusterOps(
alias="kmeans-25",
vector_fields=['sample_vector_']
)
centroids = cluster_ops.create_centroids()
"""
# Get an array of the different vectors
if len(self.vector_fields) > 1:
raise NotImplementedError(
"Do not currently support multiple vector fields for centroid creation."
)
# calculate the centroids
centroid_vectors = self.calculate_centroids()
self.insert_centroids(
centroid_documents=centroid_vectors,
)
return centroid_vectors
def calculate_centroids(self):
import numpy as np
# calculate the centroids
centroid_vectors = {}
def calculate_centroid(vectors):
X = np.array(vectors)
return X.mean(axis=0)
centroid_vectors = self._operate_across_clusters(
field=self.vector_fields[0], func=calculate_centroid
)
# Does this insert properly?
if isinstance(centroid_vectors, dict):
centroid_vectors = [
{"_id": k, self.vector_fields[0]: v}
for k, v in centroid_vectors.items()
]
return centroid_vectors
def get_centroid_documents(self):
centroid_vectors = {}
if self.model._centroids is not None:
centroid_vectors = self.model._centroids
# get the cluster label function
labels = range(len(centroid_vectors))
cluster_ids = self.format_cluster_labels(labels)
if len(self.vector_fields) > 1:
warnings.warn(
"Currently do not support inserting centroids with multiple vector fields"
)
centroids = [
{"_id": k, self.vector_fields[0]: v}
for k, v in zip(cluster_ids, centroid_vectors)
]
else:
centroids = self.create_centroids()
return centroids
def list_closest(
self,
cluster_ids: Optional[list] = None,
select_fields: Optional[List] = None,
approx: int = 0,
page_size: int = 1,
page: int = 1,
similarity_metric: str = "cosine",
filters: Optional[list] = None,
facets: Optional[list] = None,
include_vector: bool = False,
cluster_properties_filters: Optional[Dict] = None,
include_count: bool = False,
include_facets: bool = False,
verbose: bool = False,
):
"""
List of documents closest from the center.
Parameters
----------
dataset_id: string
Unique name of dataset
vector_fields: list
The vector fields where a clustering task runs
cluster_ids: list
Any of the cluster ids
alias: string
Alias is used to name a cluster
centroid_vector_fields: list
Vector fields stored
select_fields: list
Fields to include in the search results, empty array/list means all fields
approx: int
Used for approximate search to speed up search. The higher the number, faster the search but potentially less accurate
sum_fields: bool
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size: int
Size of each page of results
page: int
Page of the results
similarity_metric: string
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters: list
Query for filtering the search results
facets: list
Fields to include in the facets, if [] then all
min_score: int
Minimum score for similarity metric
include_vectors: bool
Include vectors in the search results
include_count: bool
Include the total count of results in the search results
include_facets: bool
Include facets in the search results
cluster_properties_filter: dict
Filter if clusters with certain characteristics should be hidden in results
"""
if cluster_properties_filters is None:
cluster_properties_filters = {}
return self.datasets.cluster.centroids.list_closest_to_center(
dataset_id=self.dataset_id,
vector_fields=self.vector_fields,
alias=self.alias,
cluster_ids=cluster_ids,
select_fields=select_fields,
approx=approx,
page_size=page_size,
page=page,
similarity_metric=similarity_metric,
filters=filters,
facets=facets,
include_vector=include_vector,
include_count=include_count,
include_facets=include_facets,
cluster_properties_filter=cluster_properties_filters,
verbose=verbose,
)
@track
def list_furthest(
self,
cluster_ids: Optional[List] = None,
centroid_vector_fields: Optional[List] = None,
select_fields: Optional[List] = None,
approx: int = 0,
sum_fields: bool = True,
page_size: int = 3,
page: int = 1,
similarity_metric: str = "cosine",
filters: Optional[List] = None,
# facets: List = [],
min_score: int = 0,
include_vector: bool = False,
include_count: bool = True,
cluster_properties_filter: Optional[Dict] = {},
):
"""
List documents furthest from the center.
Parameters
----------
dataset_id: string
Unique name of dataset
vector_fields: list
The vector field where a clustering task was run.
cluster_ids: list
Any of the cluster ids
alias: string
Alias is used to name a cluster
select_fields: list
Fields to include in the search results, empty array/list means all fields
approx: int
Used for approximate search to speed up search. The higher the number, faster the search but potentially less accurate
sum_fields: bool
Whether to sum the multiple vectors similarity search score as 1 or seperate
page_size: int
Size of each page of results
page: int
Page of the results
similarity_metric: string
Similarity Metric, choose from ['cosine', 'l1', 'l2', 'dp']
filters: list
Query for filtering the search results
facets: list
Fields to include in the facets, if [] then all
min_score: int
Minimum score for similarity metric
include_vectors: bool
Include vectors in the search results
include_count: bool
Include the total count of results in the search results
include_facets: bool
Include facets in the search results
"""
return self.datasets.cluster.centroids.list_furthest_from_center(
dataset_id=self.dataset_id,
vector_fields=self.vector_fields,
alias=self.alias,
cluster_ids=cluster_ids,
centroid_vector_fields=centroid_vector_fields,
select_fields=select_fields,
approx=approx,
sum_fields=sum_fields,
page_size=page_size,
page=page,
similarity_metric=similarity_metric,
filters=filters,
min_score=min_score,
include_vector=include_vector,
include_count=include_count,
cluster_properties_filter=cluster_properties_filter,
)
def explain_text_clusters(
self,
text_field,
encode_fn_or_model,
n_closest: int = 5,
highlight_output_field="_explain_",
algorithm: str = "relational",
model_kwargs: Optional[dict] = None,
):
"""
It takes a text field and a function that encodes the text field into a vector.
It then returns the top n closest vectors to each cluster centroid.
.. code-block::
def encode(X):
return [1, 2, 1]
cluster_ops.explain_text_clusters(text_field="hey", encode_fn_or_model=encode)
Parameters
----------
text_field
The field in the dataset that contains the text to be explained.
encode_fn
This is the function that will be used to encode the text.
n_closest : int, optional
The number of closest documents to each cluster to return.
highlight_output_field, optional
The name of the field that will be added to the output dataset.
algorithm: str
Algorithm is either "centroid" or "relational"
Returns
-------
A new dataset with the same data as the original dataset, but with a new field called _explain_
"""
if isinstance(encode_fn_or_model, str):
# Get the model
from relevanceai.operations_new.vectorize.text.base import VectorizeTextBase
self.model = VectorizeTextBase._get_model(encode_fn_or_model)
encode_fn = self.model.encode
else:
encode_fn = encode_fn_or_model
from relevanceai.operations_new.cluster.text.explainer.ops import (
TextClusterExplainerOps,
)
ops = TextClusterExplainerOps(credentials=self.credentials)
if algorithm == "centroid":
return ops.explain_clusters(
dataset_id=self.dataset_id,
alias=self.alias,
vector_fields=self.vector_fields,
text_field=text_field,
encode_fn=encode_fn,
n_closest=n_closest,
highlight_output_field=highlight_output_field,
)
elif algorithm == "relational":
return ops.explain_clusters_relational(
dataset_id=self.dataset_id,
alias=self.alias,
vector_fields=self.vector_fields,
text_field=text_field,
encode_fn=encode_fn,
n_closest=n_closest,
highlight_output_field=highlight_output_field,
)
raise ValueError("Algorithm needs to be either `relational` or `centroid`.")
def store_operation_metadatas(self):
self.store_operation_metadata(
operation="cluster",
values=str(
{
"model": self.model,
"vector_fields": self.vector_fields,
"alias": self.alias,
"model_kwargs": self.model_kwargs,
}
),
)
@property
def centroids(self):
"""
Access the centroids of your dataset easily
.. code-block::
ds = client.Dataset("sample")
cluster_ops = ds.ClusterOps(
vector_fields=["sample_vector_"],
alias="simple"
)
cluster_ops.centroids
"""
if not hasattr(self, "_centroids"):
self._centroids = self.datasets.cluster.centroids.documents(
dataset_id=self.dataset_id,
vector_fields=self.vector_fields,
alias=self.alias,
page_size=9999,
include_vector=True,
)["results"]
return self._centroids
def get_centroid_from_id(
self,
cluster_id: str,
) -> Dict[str, Any]:
"""> It takes a cluster id and returns the centroid with that id
Parameters
----------
cluster_id : str
The id of the cluster to get the centroid for.
Returns
-------
The centroid with the given id.
"""
for centroid in self.centroids:
if centroid["_id"] == cluster_id:
return centroid
raise ValueError(f"Missing the centorid with id {cluster_id}")
@staticmethod
def _get_filters(
filters: List[Dict[str, Union[str, int]]],
vector_fields: List[str],
) -> List[Dict[str, Union[str, int]]]:
"""It takes a list of filters and a list of vector fields and returns a list of filters that
includes the original filters and a filter for each vector field that checks if the vector field
exists
Parameters
----------
filters : List[Dict[str, Union[str, int]]]
List[Dict[str, Union[str, int]]]
vector_fields : List[str]
List[str] = ["vector_field_1", "vector_field_2"]
Returns
-------
A list of dictionaries.
"""
vector_field_filters = [
{
"field": vector_field,
"filter_type": "exists",
"condition": ">=",
"condition_value": " ",
}
for vector_field in vector_fields
]
filters = deepcopy(filters)
if filters is None:
filters = vector_field_filters
else:
filters += vector_field_filters # type: ignore
return filters
def merge(self, cluster_ids: list):
"""
Merge clusters into the first one.
The centroids are re-calculated and become a new middle.
"""
return self.datasets.cluster.merge(
dataset_id=self.dataset_id,
vector_fields=self.vector_fields,
alias=self.alias,
cluster_ids=cluster_ids,
)
| 2.078125
| 2
|
subtask1/torch_ans_sel/run_model.py
|
DeepInEvil/NOESIS-II_deep
| 0
|
12782694
|
from data_utils import UDC
from transformer_rnn import TransformerRNN
from args import get_args
from eval import eval_model
import torch
import numpy as np
from tqdm import tqdm
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import f1_score
import torch.nn as nn
args = get_args()
if args.gpu:
torch.cuda.manual_seed(args.randseed)
data = UDC(train_inp=args.train_inp,
val_inp=args.val_inp)
model = TransformerRNN(emb_dim=args.input_size, n_vocab=data.bpe.vocab_size(), rnn_h_dim=256, gpu = args.gpu)
criteria = nn.NLLLoss()
solver = optim.Adam(model.parameters(), lr=args.lr)
def train():
for epoch in range(args.epochs):
model.train()
print('\n\n-------------------------------------------')
print('Epoch-{}'.format(epoch))
print('-------------------------------------------')
train_iter = enumerate(data.get_batches('train'))
if not args.no_tqdm:
train_iter = tqdm(train_iter)
train_iter.set_description_str('Training')
train_iter.total = len(data.train)
for it, mb in train_iter:
c, c_u_m, c_m, r, r_u_m, r_m, y = mb
# print (c, c_u_m, c_m, r, y)
# getting predictions
pred = model(c, c_u_m, c_m, r, r_u_m, r_m)
#train_iter.set_description(model.print_loss())
#loss = F.nll_loss(pred, r)
#loss = criteria(pred, y)
#y = torch.argmax(y)
#print (y.size())
loss = criteria(pred, y)
loss.backward()
#print (model.conv3.grad)
#clip_gradient_threshold(model, -10, 10)
solver.step()
solver.zero_grad()
val_mrr = eval_model(model, data, 'valid')
print ('Validation MRR for this epoch:'+str(val_mrr))
if __name__ == '__main__':
train()
| 2.171875
| 2
|
lib/TaxaTransfer/InsertExternalDatabase.py
|
ZFMK/gbif2tnt
| 0
|
12782695
|
import logging
import logging.config
logger = logging.getLogger('sync_gbif2tnt')
import re
import pudb
from configparser import ConfigParser
config = ConfigParser()
config.read('config.ini')
from .InsertIntoTablesBase import InsertIntoTablesBase
class InsertExternalDatabaseBase(InsertIntoTablesBase):
def __init__(self, tntdbcon, temptablename):
InsertIntoTablesBase.__init__(self, tntdbcon)
self.temptable = temptablename
self.externaldatabasetable = 'TaxonNameExternalDatabase'
self.idcolumn = 'ExternalDatabaseID'
self.externalidstable = 'TaxonNameExternalID'
self.gbif_source = dict(config['gbif_source_details'])
self.edb_uri = self.gbif_source['uri']
self.edb_name = self.gbif_source['name']
self.edb_accession_date = self.gbif_source['accessiondate']
self.edb_version = self.gbif_source['version']
self.edb_license = self.gbif_source['license']
self.insertExternalDatabase()
self.insertExternalIDs()
def getExternalDatabaseID(self):
query = """
SELECT [ExternalDatabaseID]
FROM
[{0}]
WHERE
[ExternalDatabaseURI] = ?
AND [ExternalDatabaseName] = ?
AND [ExternalDatabaseVersion] = ?
""".format(self.externaldatabasetable)
self.cur.execute(query, [
self.edb_uri,
self.edb_name,
self.edb_version
])
row = self.cur.fetchone()
if row is None:
return None
else:
return row[0]
def insertExternalDatabase(self):
self.databaseid = self.getExternalDatabaseID()
if self.databaseid is not None:
return
else:
maxid = self.getMaxID(self.externaldatabasetable, self.idcolumn)
query = """
INSERT INTO [{0}] (
[ExternalDatabaseID]
, [ExternalDatabaseURI]
, [ExternalDatabaseName]
, [InternalNotes]
, [ExternalDatabaseVersion]
, [Rights]
)
VALUES (?, ?, ?, ?, ?, ?)
;""".format(self.externaldatabasetable)
self.cur.execute(query, [
maxid + 1,
self.edb_uri,
self.edb_name,
self.edb_accession_date,
self.edb_version,
self.edb_license
])
self.con.commit()
self.databaseid = maxid + 1
return
def insertExternalIDs(self):
query = """
INSERT INTO [{0}]
(
[NameID],
[ExternalDatabaseID],
[ExternalNameURI]
)
SELECT [NameID]
, {1} AS [ExternalDatabaseID]
, [GBIFTaxonURL]
FROM
[{2}]
;""".format(self.externalidstable, self.databaseid, self.temptable)
self.cur.execute(query)
self.con.commit()
| 2.34375
| 2
|
lightkurve/correctors/corrector.py
|
burke86/lightkurve
| 0
|
12782696
|
<filename>lightkurve/correctors/corrector.py<gh_stars>0
"""Implements the abstract `Corrector` base class.
"""
from abc import ABC, abstractmethod
import matplotlib
import numpy as np
from .. import LightCurve
from .metrics import overfit_metric_lombscargle, underfit_metric_neighbors
class Corrector(ABC):
"""Abstract base class documenting the required structure of classes
designed to remove systematic noise from light curves.
Attributes
----------
original_lc : LightCurve
The uncorrected light curve. Must be passed into (or computed by) the
constructor method.
corrected_lc : LightCurve
Corrected light curve. Must be updated upon each call to the `correct()` method.
cadence_mask : np.array of dtype=bool
Boolean array with the same length as `original_lc`.
True indicates that a cadence should be used to fit the noise model.
By setting certain cadences to False, users can exclude those cadences
from informing the noise model, which will help prevent the overfitting
of those signals (e.g. exoplanet transits).
By default, the cadence mask is True across all cadences.
Methods
-------
__init__()
Accepts all the data required to execute the correction.
The constructor must set the `original_lc` attribute.
correct() -> LightCurve
Executes the correction, optionally accepting meaningful parameters that
can be used to modify the way the correction is applied.
This method must set or update the `corrected_lc` attribute on each run.
diagnose() -> matplotlib.axes.Axes
Creates plots to elucidate the user's most recent call to `correct()`.
"""
@property
def original_lc(self) -> LightCurve:
if hasattr(self, "_original_lc"):
return self._original_lc
else:
raise AttributeError("`original_lc` has not been instantiated yet.")
@original_lc.setter
def original_lc(self, original_lc):
self._original_lc = original_lc
@property
def corrected_lc(self) -> LightCurve:
if hasattr(self, "_corrected_lc"):
return self._corrected_lc
else:
raise AttributeError(
"You need to call the `correct()` method "
"before you can access `corrected_lc`."
)
@corrected_lc.setter
def corrected_lc(self, corrected_lc):
self._corrected_lc = corrected_lc
@property
def cadence_mask(self) -> np.array:
if not hasattr(self, "_cadence_mask"):
self._cadence_mask = np.ones(len(self.original_lc), dtype=bool)
return self._cadence_mask
@cadence_mask.setter
def cadence_mask(self, cadence_mask):
self._cadence_mask = cadence_mask
def __init__(self, original_lc: LightCurve) -> None:
"""Constructor method.
The constructor shall:
* accept all data required to run the correction (e.g. light curves,
target pixel files, engineering data).
* instantiate the `original_lc` property.
"""
self.original_lc = original_lc
@abstractmethod
def correct(
self, cadence_mask: np.array = None, optimize: bool = False
) -> LightCurve:
"""Returns a `LightCurve` from which systematic noise has been removed.
This method shall:
* accept meaningful parameters that can be used to tune the correction,
including:
- `optimize`: should an optimizer be used to tune the parameters?
- `cadence_mask`: flags cadences to be used to fit the noise model.
* store all parameters as object attributes (e.g. `self.optimize`, `self.cadence_mask`);
* store helpful diagnostic information as object attributes;
* store the result in the `self.corrected_lc` attribute;
* return `self.corrected_lc`.
"""
if cadence_mask:
self.cadence_mask = cadence_mask
# ... perform correction ...
# self.corrected_lc = corrected_lc
# return corrected_lc
@abstractmethod
def diagnose(self) -> matplotlib.axes.Axes:
"""Returns plots which elucidate the most recent call to `correct()`.
This method shall plot useful diagnostic information which have been
stored as object attributes during the most recent call to `correct()`.
"""
pass
def compute_overfit_metric(self, **kwargs) -> float:
""" Measures the degree of over-fitting in the correction.
See the docstring of `lightkurve.correctors.metrics.overfit_metric_lombscargle`
for details.
Returns
-------
overfit_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
return overfit_metric_lombscargle(
# Ignore masked cadences in the computation
self.original_lc[self.cadence_mask],
self.corrected_lc[self.cadence_mask],
**kwargs
)
def compute_underfit_metric(self, **kwargs) -> float:
"""Measures the degree of under-fitting the correction.
See the docstring of `lightkurve.correctors.metrics.underfit_metric_neighbors`
for details.
Returns
-------
underfit_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
return underfit_metric_neighbors(self.corrected_lc[self.cadence_mask], **kwargs)
| 2.578125
| 3
|
mmdet/datasets/coco_with_sub_image.py
|
ArthurWish/mmdetection
| 0
|
12782697
|
<reponame>ArthurWish/mmdetection<filename>mmdet/datasets/coco_with_sub_image.py
import os.path
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class CocoDatasetWithSubImage(CocoDataset):
def __init__(self, sub_images=(), **kwargs):
self.sub_images = sub_images
super(CocoDatasetWithSubImage, self).__init__(**kwargs)
def load_annotations(self, ann_file):
data_infos = super().load_annotations(ann_file)
for index,info in enumerate(data_infos):
data_infos[index]['filename'] = [os.path.join(info['filename'], sub_image_name) for sub_image_name in self.sub_images]
return data_infos
def _parse_ann_info(self, img_info, ann_info):
filename_list = img_info['filename']
img_info['filename'] = filename_list[0]
ann = super()._parse_ann_info(img_info, ann_info)
img_info['filename'] = filename_list
return ann
| 2.359375
| 2
|
Beakjoon_Online_Judge/ready_winter.py
|
pkch93/Algorithm
| 2
|
12782698
|
<gh_stars>1-10
def solution(acorns):
n = len(acorns)
dp = [0]*n
for i in range(n):
temp = acorns[i]
if i == 0:
dp[i] = temp
else:
dp[i] = max(dp[i-1]+temp, temp)
acorns.sort(reverse=True)
smart = 0
for i in range(n):
if i != 0 and acorns[i] <= 0:
break
smart += acorns[i]
return max(dp), smart
def main():
n = int(input())
acorns = list(map(int, input().split()))
print(*solution(acorns))
if __name__ == '__main__':
main()
| 3.0625
| 3
|
Financely/basic_app/get_stock_info.py
|
Frostday/Financely
| 8
|
12782699
|
import urllib.request, json
def getStockInfo(var):
var = var.replace(' ','')
url = "https://finance.yahoo.com/_finance_doubledown/api/resource/searchassist;searchTerm={}?device=console&returnMeta=true".format(var)
response = urllib.request.urlopen(url)
data = json.loads(response.read())
return data['data']['items']
| 3.09375
| 3
|
src/models/spikingpool2D.py
|
k-timy/snn_pytorch
| 0
|
12782700
|
<filename>src/models/spikingpool2D.py
"""
Author: <NAME>
April 2021
"""
import types
import torch
import torch.nn as nn
import torch.nn.functional as F
GLOBAL_DEBUG = False
class SpikingAveragePool2D(nn.Module):
"""
Applies a 2D Convolution transformation on the input spikes. And holds the states of
the neurons. It's weights are initialized from a regular 2D convolution layer with no bias.
batch_size : can be given in constructor. Or once the layer is applied on
some data.
inputs to this layer are one single time step of spikes. And the outputs are
also one single time step.
"""
def __init__(self,kernel_size : int = 2, stride : int = 2, padding : int = 0, count_include_pad : bool = False,device=None, **kwargs ):
super(SpikingAveragePool2D,self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.count_include_pad = count_include_pad
self._sim_params_dict = {
"batch_size" : None,
"dt" : torch.as_tensor(0.001,dtype=torch.float32),
"threshold" : torch.as_tensor(1,dtype=torch.float32),
"ref_period" : torch.as_tensor(0,dtype=torch.float32),
"dtype" : torch.float32, # TODO get this from input layer
}
self.sim_params = types.SimpleNamespace()
self.device = device
self.spike_params_shape = None
# set whatever parameters are available in the constructor
self._init_sim_params(**kwargs)
self.zero = torch.as_tensor(0,dtype=self._sim_params_dict['dtype'])
if device is not None:
self.zero = self.zero.to(device)
def init_spiking_params(self):
self.neuron_ref_cooldown = torch.zeros(self.spike_params_shape,requires_grad=False)
self.sum_spikes = torch.zeros(self.spike_params_shape,requires_grad=False)
self.membrane_potential = torch.zeros(self.spike_params_shape,requires_grad=False)
self.zeros = torch.zeros(self.spike_params_shape,requires_grad=False)
if self.device is not None:
self.neuron_ref_cooldown = self.neuron_ref_cooldown.to(self.device)
self.sum_spikes = self.sum_spikes.to(self.device)
self.membrane_potential = self.membrane_potential.to(self.device)
self.zeros = self.zeros.to(self.device)
def set_threshold(self,new_threshold):
self.sim_params.threshold = torch.as_tensor(new_threshold,dtype=self.sim_params.dtype,device=self.device)
def reset_layer(self):
self.init_spiking_params()
def _init_sim_params(self,**kwargs):
for k,v in self._sim_params_dict.items():
varg = kwargs.get(k)
# if varg is not set
if varg is not None:
if k in ["batch_size","dtype"]:
setattr(self.sim_params, k, varg)
else:
if self.device is not None:
setattr(self.sim_params, k, torch.as_tensor(varg,dtype=self._sim_params_dict["dtype"]).to(self.device))
else:
setattr(self.sim_params, k, torch.as_tensor(varg,dtype=self._sim_params_dict["dtype"]))
# if varg == None
else:
if k in ["batch_size","dtype"]:
setattr(self.sim_params, k, v)
else:
v_ = torch.as_tensor(v,dtype=self._sim_params_dict["dtype"])
if self.device is not None:
setattr(self.sim_params, k, v_.to(self.device))
else:
setattr(self.sim_params, k, v_)
def clone(self):
raise Exception("TODO: Not implemented yet!")
def _get_shape_of_output(self, spikes) -> torch.Tensor.shape:
"""
Regardless of the Batchsize, Channels, Height and Width of the input data,
The pooling layer works. However, the spike neurons should be able to keep
track of the membraine poteintials, refactory cooldown, etc. So this
function gets the shape of the output based on a batch of input spikes and
lazily initializes the SNN parameters.
"""
sample_out = None
with torch.no_grad():
sample_ones = torch.ones_like(spikes)
sample_out = self._pool_spikes(sample_ones)
return sample_out.shape
def _pool_spikes(self, spikes):
return F.avg_pool2d(spikes, kernel_size = self.kernel_size,stride = self.stride,padding = self.padding,count_include_pad=self.count_include_pad)
def forward(self, spikes):
if self.spike_params_shape is None:
self.sim_params.batch_size = spikes.shape[0]
self.spike_params_shape = self._get_shape_of_output(spikes)
self.init_spiking_params()
device = self.device
with torch.no_grad():
# Get input impulse from incoming spikes
impulse = self._pool_spikes(spikes)
# Do not add impulse if neuron is in refactory period
in_ref_per = self.neuron_ref_cooldown > self.zero
if device is not None:
in_ref_per = in_ref_per.to(device)
impulse = torch.where(in_ref_per, self.zero, impulse)
# Add input impulse to membrane potentials
self.membrane_potential = self.membrane_potential + impulse
# Check for spiking neurons
spikings_bool = self.membrane_potential >= self.sim_params.threshold
if device is not None:
spikings_bool = spikings_bool.to(device)
# Reset the potential of the membrane if it has exceeded the threshold
self.membrane_potential = torch.where(spikings_bool, self.zero, self.membrane_potential)
# Excited neurons should go to cooldown state
self.neuron_ref_cooldown = torch.where(spikings_bool, self.sim_params.ref_period, self.neuron_ref_cooldown)
# Cooldown timer count-down
self.neuron_ref_cooldown = self.neuron_ref_cooldown - self.sim_params.dt
# Prevent from getting negative values
self.neuron_ref_cooldown = torch.max(self.neuron_ref_cooldown,self.zero)
# calculate the output of this layer
out_spikes = spikings_bool.type(self.sim_params.dtype)
self.sum_spikes = self.sum_spikes + out_spikes
return out_spikes
| 3.375
| 3
|
tests/base_tests/linear_tests/test_translate.py
|
lycantropos/gon
| 10
|
12782701
|
<filename>tests/base_tests/linear_tests/test_translate.py
from typing import Tuple
from hypothesis import given
from gon.base import Linear
from gon.hints import Scalar
from . import strategies
@given(strategies.linear_geometries_with_coordinates_pairs)
def test_isometry(linear_with_steps: Tuple[Linear, Scalar, Scalar]) -> None:
linear, step_x, step_y = linear_with_steps
result = linear.translate(step_x, step_y)
assert result.length == linear.length
| 1.945313
| 2
|
statsig/statsig.py
|
ramikhalaf/python-sdk
| 0
|
12782702
|
<gh_stars>0
from .statsig_server import StatsigServer
__instance = StatsigServer()
def initialize(secretKey, options = None):
__instance.initialize(secretKey, options)
def check_gate(user, gate):
return __instance.check_gate(user, gate)
def get_config(user, config):
return __instance.get_config(user, config)
def get_experiment(user, experiment):
return get_config(user, experiment)
def log_event(event):
__instance.log_event(event)
def shutdown():
__instance.shutdown()
| 1.898438
| 2
|
speedysvc/client_server/network/NetworkClient.py
|
mcyph/shmrpc
| 4
|
12782703
|
import time
import warnings
import socket
from _thread import allocate_lock
from os import getpid
from speedysvc.toolkit.documentation.copydoc import copydoc
from speedysvc.client_server.base_classes.ClientProviderBase import ClientProviderBase
from speedysvc.client_server.network.consts import len_packer, response_packer
from speedysvc.compression.compression_types import zlib_compression
class NetworkClient(ClientProviderBase):
def __init__(self,
server_methods,
host='127.0.0.1', port=None,
compression_inst=zlib_compression):
"""
:param server_methods:
:param host:
"""
self.host = host
self.port = port
self.lock = allocate_lock()
ClientProviderBase.__init__(self, server_methods)
self.compression_inst = compression_inst
self.__connect()
def __connect(self):
self.conn_to_server = conn_to_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn_to_server.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
conn_to_server.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 65536)
conn_to_server.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 65536)
conn_to_server.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
port = (
self.port
if self.port is not None
else self.server_methods.port
)
conn_to_server.connect((self.host, port))
conn_to_server.send(
self.compression_inst.typecode
)
def __del__(self):
self.conn_to_server.close()
@copydoc(ClientProviderBase.send)
def send(self, fn, data):
with self.lock:
return self._send(fn, data)
def _send(self, fn, data):
actually_compressed, data = \
self.compression_inst.compress(fn.serialiser.dumps(data))
cmd = fn.__name__.encode('ascii')
prefix = len_packer.pack(int(actually_compressed), len(data), len(cmd))
displayed_reconnect_msg = False
while True:
# Try to keep reconnecting if
# connection no longer functioning
try:
self.conn_to_server.send(prefix + cmd + data)
def recv(amount):
# Note string concatenation is slower in earlier versions
# of python, but should be faster than list concat in later
# versions after 3.
r = b''
while len(r) != amount:
add_me = self.conn_to_server.recv(amount)
if not add_me:
raise ConnectionResetError()
r += add_me
return r
actually_compressed, data_len, status = \
response_packer.unpack(recv(response_packer.size))
data = recv(data_len)
break
except (socket.error, ConnectionResetError):
if not displayed_reconnect_msg:
displayed_reconnect_msg = True
warnings.warn(
f"Client [pid {getpid()}]: "
f"TCP connection to service "
f"{self.server_methods.name} reset - "
f"the service may need to be checked/restarted!"
)
while True:
try:
import time
time.sleep(1)
self.__connect()
except (ConnectionRefusedError, ConnectionError):
continue
break
if actually_compressed:
data = self.compression_inst.decompress(data)
if status == b'+':
return fn.serialiser.loads(data)
else:
self._handle_exception(data)
raise Exception(data.decode('utf-8'))
if __name__ == '__main__':
inst = NetworkClient(5555)
t = time.time()
for x in range(500000):
i = b"my vfdsfdsfsdfsdfsdfdsfsdaluetasdsadasdsadsadsaest"# bytes([randint(0, 255)])*500
#print('SEND:', i)
assert inst.send('echo', i) == i
print(time.time()-t)
| 2.328125
| 2
|
pyjdbc/java.py
|
manifoldai/pyjdbc
| 2
|
12782704
|
<reponame>manifoldai/pyjdbc<filename>pyjdbc/java.py
import jpype
from jpype import JClass
# Enable Java imports
from jpype import JArray
def get_url(path: str):
"""
Formats a given path into a url object.
Converts `/home/myfile.txt` into `file:/home/myfile.txt`
:param path:
:return:
"""
Paths = JClass('java.nio.file.Paths')
url = Paths.get(path).toUri().toURL() # todo add exception handling if the path isnt valid
return url
class System:
"""
Wrapper around java.lang.System
"""
@staticmethod
def _system():
return JClass('java.lang.System')
@classmethod
def get_property(cls, name):
return cls._system().getProperty(name)
@classmethod
def set_property(cls, name, value):
cls._system().setProperty(name, value)
@classmethod
def clear_property(cls, name):
return cls._system().clearProperty(name)
@classmethod
def get_env(cls, name):
return cls._system().getenv(name)
class Properties:
PROPERTIES_CLASS = 'java.util.Properties'
@classmethod
def from_dict(cls, dictionary):
JProperties = JClass(cls.PROPERTIES_CLASS)
jprops = JProperties()
for k, v in dictionary.items():
if not isinstance(k, str):
raise ValueError('[{}] keys must be strings, got: {}'.format(cls.PROPERTIES_CLASS, type(k)))
jprops.setProperty(k, v)
return jprops
@classmethod
def from_tuple(cls, sequence):
as_dict = dict(sequence)
return cls.from_dict(as_dict)
@staticmethod
def to_dict(properties):
keys = properties.stringPropertyNames()
dictionary = {key: properties.getProperty(key) for key in keys}
return dictionary
@staticmethod
def to_tuple(properties):
keys = properties.stringPropertyNames()
sequence = [(key, properties.getProperty(key)) for key in keys]
return sequence
class Classpath:
"""
utilities for dealing with the java classpath
"""
@staticmethod
def add(*paths):
for path in paths:
jpype.addClassPath(path)
@staticmethod
def get():
return jpype.getClassPath()
@staticmethod
def load_jar_class(jar_path, class_name):
"""
Load a class at runtime directly from a jar file.
Note that with some libraries this can cause problems because the library
will not be *visible* to the default class loader.
:param jar_path: Path to the `.jar` file.
:param class_name: The fully qualified Class Name within the jar to load.
:return:
"""
URL = JClass('java.net.URL')
URLClassLoader = JClass('java.net.URLClassLoader')
Class = JClass('java.lang.Class')
UrlArray = JArray(URL)
urls = UrlArray(1)
urls[0] = get_url(jar_path)
java_class = JClass(Class.forName(class_name, True, URLClassLoader(urls)))
return java_class
class Jvm:
ARGS = {}
@classmethod
def add_argument(cls, identifier, option):
"""
Add an argument to the jvm, (this must be used BEFORE the jvm is started)
If the jvm is already running RuntimeError will be raised
Set a jvm argument, examples include:
-Xmx1099m
-Djava.class.path=PATH_TO_JAR
:param identifier: a string identifier so Jvm options aren't duplicated: ie: ``javax.net.ssl.trustStore``
:type identifier: str
:param option: the full jvm option argument, ie: ``-Djavax.net.ssl.trustStore=trust.jks``
:type option: str
:raises: RuntimeError
:return:
"""
if cls.is_running():
raise RuntimeError('The JVM has been started, any options set after the JVM is started will have no '
'effect.\n'
'Set jvm options before acquiring any connections with `pyjdbc`')
if not any((option.startswith('-D'), option.startswith('-X'))):
raise ValueError('invalid argument "option": {}, jvm arguments must start with "-D" or "-X" \n'
'Examples:\n'
' -Xmx1099m\n',
' -Djavax.net.ssl.trustStore=trust.jks'.format(option))
cls.ARGS[identifier] = option
@classmethod
def start(cls):
# start the jvm
jpype.startJVM(*cls.ARGS.values(), interrupt=True)
@staticmethod
def stop():
jpype.shutdownJVM()
@staticmethod
def is_running():
return jpype.isJVMStarted()
@classmethod
def check_running(cls):
if cls.is_running():
raise RuntimeError('The jvm is already running')
@staticmethod
def path():
return jpype.getDefaultJVMPath()
@staticmethod
def version():
return jpype.getJVMVersion()
| 2.625
| 3
|
repokid/utils/logging.py
|
maramSalamouny/repokid
| 0
|
12782705
|
<gh_stars>0
# Copyright 2020 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import logging
from socket import gethostname
import traceback
LOGGER = logging.getLogger("repokid")
class JSONFormatter(logging.Formatter):
"""Custom formatter to output log records as JSON."""
hostname = gethostname()
def format(self, record):
"""Format the given record into JSON."""
message = {
"time": datetime.utcfromtimestamp(record.created).isoformat(),
"level": record.levelname,
"name": record.name,
"message": record.getMessage(),
"process": record.process,
"thread": record.threadName,
"hostname": self.hostname,
"filename": record.filename,
"function": record.funcName,
"lineNo": record.lineno,
}
if record.exc_info:
message[
"exception"
] = f"{record.exc_info[0].__name__}: {record.exc_info[1]}"
message["traceback"] = traceback.format_exc()
return json.dumps(message, ensure_ascii=False)
def log_deleted_and_repoed_policies(
deleted_policy_names, repoed_policies, role_name, account_number
):
"""Logs data on policies that would otherwise be modified or deleted if the commit flag were set.
Args:
deleted_policy_names (list<string>)
repoed_policies (list<dict>)
role_name (string)
account_number (string)
Returns:
None
"""
for name in deleted_policy_names:
LOGGER.info(
"Would delete policy from {} with name {} in account {}".format(
role_name, name, account_number
)
)
if repoed_policies:
LOGGER.info(
"Would replace policies for role {} with: \n{} in account {}".format(
role_name,
json.dumps(repoed_policies, indent=2, sort_keys=True),
account_number,
)
)
| 2
| 2
|
tests/test_0123-atlas-issues.py
|
nikoladze/uproot4
| 0
|
12782706
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE
from __future__ import absolute_import
import pytest
import skhep_testdata
import uproot4
def test_version():
assert uproot4.classname_decode(
uproot4.classname_encode("xAOD::MissingETAuxAssociationMap_v2")
) == ("xAOD::MissingETAuxAssociationMap_v2", None)
assert uproot4.classname_decode(
uproot4.classname_encode("xAOD::MissingETAuxAssociationMap_v2", 9)
) == ("xAOD::MissingETAuxAssociationMap_v2", 9)
| 1.484375
| 1
|
cr_week6_test/scripts/robot_expression_prediction.py
|
Ani997/Human-Robot-Interaction-
| 0
|
12782707
|
<reponame>Ani997/Human-Robot-Interaction-
#!/usr/bin/env python
import rospy
from cr_week6_test.msg import perceived_info
from cr_week6_test.msg import robot_info
from cr_week6_test.srv import predict_robot_expression
import random
from bayesian_belief_networks.ros_utils import *
def human_expression_prob(human_expression):
return 0.33
def human_action_prob(human_action):
return 0.33
def object_size_prob(object_size):
return 0.50
## Implementation of conditional probability table
def Happy(human_expression, human_action, object_size):
table = dict()
table['HRS'] = 0.8
table['HRB'] = 1.0
table['HOS'] = 0.8
table['HOB'] = 1.0
table['HAS'] = 0.6
table['HAB'] = 0.8
table['SRS'] = 0
table['SRB'] = 0
table['SOS'] = 0
table['SOB'] = 0.1
table['SAS'] = 0
table['SAB'] = 0.2
table['NRS'] = 0.7
table['NRB'] = 0.8
table['NOS'] = 0.8
table['NOB'] = 0.9
table['NAS'] = 0.6
table['NAB'] = 0.7
key = ''
if human_expression == 1:
key = key + 'Happy'
elif human_expression == 2:
key = key + 'Sad'
else:
key = key + 'Neutral'
if human_action == 1:
key = key + 'Looking at Robot'
elif human_action == 2:
key = key + 'Object'
else:
key = key + 'Away'
if object_size == 1:
key = key + 'Small'
else:
key = key + 'Big'
return table[key]
def Sad(human_expression, human_action, object_size):
table = dict()
table['HRS'] = 0.2
table['HRB'] = 0
table['HOS'] = 0.2
table['HOB'] = 0
table['HAS'] = 0.2
table['HAB'] = 0.2
table['SRS'] = 0
table['SRB'] = 0
table['SOS'] = 0.1
table['SOB'] = 0.1
table['SAS'] = 0.2
table['SAB'] = 0.2
table['NRS'] = 0.3
table['NRB'] = 0.2
table['NOS'] = 0.2
table['NOB'] = 0.1
table['NAS'] = 0.2
table['NAB'] = 0.2
key = ''
if human_expression == 1:
key = key + 'Happy'
elif human_expression == 2:
key = key + 'Sad'
else:
key = key + 'Neutral'
if human_action == 1:
key = key + 'Looking at Robot'
elif human_action == 2:
key = key + 'Object'
else :
key = key + 'Away'
if object_size == 1:
key = key + 'Small'
else:
key = key + 'Big'
return table[key]
def Neutral(human_expression, human_action, object_size):
table = dict()
table['HRS'] = 0
table['HRB'] = 0
table['HOS'] = 0
table['HOB'] = 0
table['HAS'] = 0.2
table['HAB'] = 0
table['SRS'] = 1
table['SRB'] = 1
table['SOS'] = 0.9
table['SOB'] = 0.8
table['SAS'] = 0.8
table['SAB'] = 0.6
table['NRS'] = 0
table['NRB'] = 0
table['NOS'] = 0
table['NOB'] = 0
table['NAS'] = 0.2
table['NAB'] = 0.1
key = ''
if human_expression == 1:
key = key + 'Happy'
elif human_expression == 2:
key = key + 'Sad'
else:
key = key + 'Neutral'
if human_action == 1:
key = key + 'Looking at Robot'
elif human_action == 2:
key = key + 'Object'
else:
key = key + 'Away'
if object_size == 1:
key = key + 'Small'
else:
key = key + 'Big'
return table[key]
## Building the Bayesian Belief Network(BBN)
def Predictor(BBN):
BBN = ros_build_bbn(
human_expression_prob,
human_action_prob,
Happy, Sad, Neutral)
return BBN
def expression_prediction_server():
rospy.init_node('robot_expression_prediction') #node initialised
serv = rospy.Service('RobotExpressionPrediction', predict_robot_expression, Predictor) #service started
rospy.spin()
if __name__ == "__main__":
expression_prediction_server()
| 2.4375
| 2
|
cdrouter/users.py
|
qacafe/cdrouter.py
| 4
|
12782708
|
<reponame>qacafe/cdrouter.py
#
# Copyright (c) 2017-2020 by QA Cafe.
# All Rights Reserved.
#
"""Module for accessing CDRouter Users."""
import collections
from marshmallow import Schema, fields, post_load
from .cdr_error import CDRouterError
from .cdr_datetime import DateTime
from .filters import Field as field
class User(object):
"""Model for CDRouter Users.
:param id: (optional) User ID as an int.
:param admin: (optional) Bool `True` if user is an administrator.
:param disabled: (optional) Bool `True` if user is disabled.
:param name: (optional) User name as string.
:param description: (optional) User description as string.
:param created: (optional) User creation time as `DateTime`.
:param updated: (optional) User last-updated time as `DateTime`.
:param token: (optional) User's API token as string.
"""
def __init__(self, **kwargs):
self.id = kwargs.get('id', None)
self.admin = kwargs.get('admin', None)
self.disabled = kwargs.get('disabled', None)
self.name = kwargs.get('name', None)
self.description = kwargs.get('description', None)
self.created = kwargs.get('created', None)
self.updated = kwargs.get('updated', None)
self.token = kwargs.get('token', None)
# only needed for change_password
self.password = kwargs.get('password', None)
self.password_confirm = kwargs.get('password_confirm', None)
class UserSchema(Schema):
id = fields.Int(as_string=True)
admin = fields.Bool()
disabled = fields.Bool()
name = fields.Str()
description = fields.Str()
created = DateTime()
updated = DateTime()
token = fields.Str()
password = fields.Str()
password_confirm = fields.Str()
@post_load
def post_load(self, data):
return User(**data)
class Page(collections.namedtuple('Page', ['data', 'links'])):
"""Named tuple for a page of list response data.
:param data: :class:`users.User <users.User>` list
:param links: :class:`cdrouter.Links <cdrouter.Links>` object
"""
class UsersService(object):
"""Service for accessing CDRouter Users."""
RESOURCE = 'users'
BASE = RESOURCE + '/'
def __init__(self, service):
self.service = service
self.base = self.BASE
def list(self, filter=None, type=None, sort=None, limit=None, page=None, detailed=None): # pylint: disable=redefined-builtin
"""Get a list of users.
:param filter: (optional) Filters to apply as a string list.
:param type: (optional) `union` or `inter` as string.
:param sort: (optional) Sort fields to apply as string list.
:param limit: (optional) Limit returned list length.
:param page: (optional) Page to return.
:param detailed: (optional) Return all fields if Bool `True`.
:return: :class:`users.Page <users.Page>` object
"""
schema = UserSchema()
if not detailed:
schema = UserSchema(exclude=('created', 'updated', 'token', 'password', 'password_confirm'))
resp = self.service.list(self.base, filter, type, sort, limit, page, detailed=detailed)
us, l = self.service.decode(schema, resp, many=True, links=True)
return Page(us, l)
def iter_list(self, *args, **kwargs):
"""Get a list of users. Whereas ``list`` fetches a single page of
users according to its ``limit`` and ``page`` arguments,
``iter_list`` returns all users by internally making
successive calls to ``list``.
:param args: Arguments that ``list`` takes.
:param kwargs: Optional arguments that ``list`` takes.
:return: :class:`users.User <users.User>` list
"""
return self.service.iter_list(self.list, *args, **kwargs)
def get(self, id): # pylint: disable=invalid-name,redefined-builtin
"""Get a user.
:param id: User ID as an int.
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
schema = UserSchema()
resp = self.service.get_id(self.base, id)
return self.service.decode(schema, resp)
def get_by_name(self, name): # pylint: disable=invalid-name,redefined-builtin
"""Get a user by name.
:param name: User name as string.
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
rs, _ = self.list(filter=field('name').eq(name), limit=1)
if len(rs) == 0:
raise CDRouterError('no such user')
return rs[0]
def create(self, resource):
"""Create a new user.
:param resource: :class:`users.User <users.User>` object
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
schema = UserSchema(exclude=('id', 'created', 'updated', 'token'))
json = self.service.encode(schema, resource)
schema = UserSchema(exclude=('password', 'password_confirm'))
resp = self.service.create(self.base, json)
return self.service.decode(schema, resp)
def edit(self, resource):
"""Edit a user.
:param resource: :class:`users.User <users.User>` object
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
schema = UserSchema(exclude=('id', 'created', 'updated', 'token', 'password', 'password_confirm'))
json = self.service.encode(schema, resource)
schema = UserSchema(exclude=('password', 'password_confirm'))
resp = self.service.edit(self.base, resource.id, json)
return self.service.decode(schema, resp)
def change_password(self, id, new, old=None, change_token=True): # pylint: disable=invalid-name,redefined-builtin
"""Change a user's password.
:param id: User ID as an int.
:param new: New password as string.
:param old: (optional) Old password as string (required if performing action as non-admin).
:param change_token: (optional) If bool `True`, also generate a new API token for user.
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
schema = UserSchema(exclude=('password', 'password_confirm'))
resp = self.service.post(self.base+str(id)+'/password/',
params={'change_token': change_token},
json={'old': old, 'new': new, 'new_confirm': new})
return self.service.decode(schema, resp)
def change_token(self, id): # pylint: disable=invalid-name,redefined-builtin
"""Change a user's token.
:param id: User ID as an int.
:return: :class:`users.User <users.User>` object
:rtype: users.User
"""
schema = UserSchema(exclude=('password', 'password_confirm'))
resp = self.service.post(self.base+str(id)+'/token/')
return self.service.decode(schema, resp)
def delete(self, id): # pylint: disable=invalid-name,redefined-builtin
"""Delete a user.
:param id: User ID as an int.
"""
return self.service.delete_id(self.base, id)
def bulk_copy(self, ids):
"""Bulk copy a set of users.
:param ids: Int list of user IDs.
:return: :class:`users.User <users.User>` list
"""
schema = UserSchema()
return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema)
def bulk_edit(self, _fields, ids=None, filter=None, type=None, all=False): # pylint: disable=redefined-builtin
"""Bulk edit a set of users.
:param _fields: :class:`users.User <users.User>` object
:param ids: (optional) Int list of user IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
"""
schema = UserSchema(exclude=('id', 'created', 'updated', 'token', 'password', 'password_confirm'))
_fields = self.service.encode(schema, _fields, skip_none=True)
return self.service.bulk_edit(self.base, self.RESOURCE, _fields, ids=ids, filter=filter, type=type, all=all)
def bulk_delete(self, ids=None, filter=None, type=None, all=False): # pylint: disable=redefined-builtin
"""Bulk delete a set of users.
:param ids: (optional) Int list of user IDs.
:param filter: (optional) String list of filters.
:param type: (optional) `union` or `inter` as string.
:param all: (optional) Apply to all if bool `True`.
"""
return self.service.bulk_delete(self.base, self.RESOURCE, ids=ids, filter=filter, type=type, all=all)
| 2.46875
| 2
|
nidhogg/__init__.py
|
ifxit/nidho
| 11
|
12782709
|
<reponame>ifxit/nidho
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .sevenmode import SevenMode
from .clustermode import ClusterMode
from .core import NidhoggException
__all__ = ["get_netapp", "get_best_volume_by_size", "get_best_volume_by_quota"]
def get_netapp(url, username, password, verify=False):
"""Return the correct connection object to the filer.
You do not have to care if the filer is a cluster-mode or a seven-mode filer.
.. note::
Provided user must be authorized to use the Netapp API of the filer.
:param url: hostname of the netapp filer
:type url: str
:param username: username to connect to the Netapp API.
:type username: str
:param password: <PASSWORD>
:type password: <PASSWORD>
:param verify: check SSL certificate
:type verify: bool
:return: Nidhogg instance
:rtype: :class:`~nidhogg.sevenmode.SevenMode` (if the filer is a seven-mode filer)
:rtype: :class:`~nidhogg.clustermode.ClusterMode` (if the filer is a cluster-mode filer)
Example:
.. code-block:: python
import nidhogg
filer = nidhogg.get_netapp("filer99.example.com", "<username>", "<password>")
filer.list_volumes()
"""
# prepend https if not specified
if not url.startswith("https://"):
url = "https://" + url
nidhogg = SevenMode(url, username, password, 1, 15, verify)
if nidhogg.clustered:
return ClusterMode(url, username, password, 1, 21, verify)
return nidhogg
def get_best_volume_by_size(volumes, filter_func=None, **kwargs):
"""Return the best volume from the list of volumes with the biggest free size.
Apply filter function before if specified.
:param volumes: list of volumes
:type volumes: list of :class:`~nidhogg.compatible.Volume`
:param filter_func: filter function applied before
:type filter_func: function
:return: volume with the biggest free size
:rtype: :class:`~nidhogg.compatible.Volume`
"""
if hasattr(filter_func, '__call__'):
volumes = [v for v in volumes if filter_func(v, **kwargs)]
if not volumes:
raise NidhoggException("No volume available.")
# use max() to get the volume with the biggest free size
return max(volumes)
def get_best_volume_by_quota(volumes, filter_func=None, **kwargs):
"""Return the best volume from the list of volumes with the smallest quota ration.
:param volumes: list of volumes
:type volumes: list of :class:`~nidhogg.compatible.VolumeWithQuotaRatio`
:param filter_func: filter function applied before
:type filter_func: function
:return: volume with the smallest quota ratio (allocated quota size / volume size)
:rtype: :class:`~nidhogg.compatible.VolumeWithQuotaRatio`
"""
if hasattr(filter_func, '__call__'):
volumes = [v for v in volumes if filter_func(v, **kwargs)]
if not volumes:
raise NidhoggException("No volume available.")
# use min() to get the volume with the smallest ratio
return min(volumes)
| 2.203125
| 2
|
bika/lims/browser/reports/qualitycontrol_referenceanalysisqc.py
|
hocinebendou/bika.gsoc
| 0
|
12782710
|
import json
import tempfile
from AccessControl import getSecurityManager
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t, isAttributeHidden
from bika.lims.browser import BrowserView
from bika.lims.browser.reports.selection_macros import SelectionMacrosView
from gpw import plot
from bika.lims.utils import to_utf8
from plone.app.content.browser.interfaces import IFolderContentsView
from plone.app.layout.globals.interfaces import IViewView
from zope.interface import implements
import os
import plone
class Report(BrowserView):
implements(IViewView)
template = ViewPageTemplateFile(
"templates/qualitycontrol_referenceanalysisqc.pt")
# if unsuccessful we return here:
default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")
def __init__(self, context, request, report=None):
super(Report, self).__init__(context, request)
self.report = report
self.selection_macros = SelectionMacrosView(self.context, self.request)
def __call__(self):
header = _("Reference analysis QC")
subheader = _("Reference analysis quality control graphs ")
MinimumResults = self.context.bika_setup.getMinimumResults()
warning_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' height='9' width='9'/>"
error_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' height='9' width='9'/>"
self.parms = []
titles = []
sample_uid = self.request.form.get('ReferenceSampleUID', '')
sample = self.reference_catalog.lookupObject(sample_uid)
if not sample:
message = _("No reference sample was selected.")
self.context.plone_utils.addPortalMessage(message, 'error')
return self.default_template()
self.parms.append(
{'title': _("Reference Sample"), 'value': sample.Title()})
titles.append(sample.Title())
service_uid = self.request.form.get('ReferenceServiceUID', '')
service = self.reference_catalog.lookupObject(service_uid)
if not service:
message = _("No analysis services were selected.")
self.context.plone_utils.addPortalMessage(message, 'error')
return self.default_template()
self.contentFilter = {'portal_type': 'ReferenceAnalysis',
'review_state': ['verified', 'published'],
'path': {
"query": "/".join(sample.getPhysicalPath()),
"level": 0}}
self.parms.append(
{'title': _("Analysis Service"), 'value': service.Title()})
titles.append(service.Title())
val = self.selection_macros.parse_daterange(self.request,
'getDateVerified',
'DateVerified')
if val:
self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
self.parms.append(val['parms'])
titles.append(val['titles'])
proxies = self.bika_analysis_catalog(self.contentFilter)
if not proxies:
message = _("No analyses matched your query")
self.context.plone_utils.addPortalMessage(message, 'error')
return self.default_template()
# Compile a list with all relevant analysis data
analyses = []
out_of_range_count = 0
results = []
capture_dates = []
plotdata = ""
tabledata = []
for analysis in proxies:
analysis = analysis.getObject()
service = analysis.getService()
resultsrange = \
[x for x in sample.getReferenceResults() if x['uid'] == service_uid][
0]
try:
result = float(analysis.getResult())
results.append(result)
except:
result = analysis.getResult()
capture_dates.append(analysis.getResultCaptureDate())
if result < float(resultsrange['min']) or result > float(
resultsrange['max']):
out_of_range_count += 1
try:
precision = str(analysis.getPrecision())
except:
precision = "2"
try:
formatted_result = str("%." + precision + "f") % result
except:
formatted_result = result
tabledata.append({_("Analysis"): analysis.getId(),
_("Result"): formatted_result,
_("Analyst"): analysis.getAnalyst(),
_(
"Captured"): analysis.getResultCaptureDate().strftime(
self.date_format_long)})
plotdata += "%s\t%s\t%s\t%s\n" % (
analysis.getResultCaptureDate().strftime(self.date_format_long),
result,
resultsrange['min'],
resultsrange['max']
)
plotdata.encode('utf-8')
result_values = [int(r) for r in results]
result_dates = [c for c in capture_dates]
self.parms += [
{"title": _("Total analyses"), "value": len(proxies)},
]
# # This variable is output to the TAL
self.report_data = {
'header': header,
'subheader': subheader,
'parms': self.parms,
'tables': [],
'footnotes': [],
}
if MinimumResults <= len(proxies):
plotscript = """
set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
set title "%(title)s"
set xlabel "%(xlabel)s"
set ylabel "%(ylabel)s"
set key off
#set logscale
set timefmt "%(timefmt)s"
set xdata time
set format x "%(xformat)s"
set xrange ["%(x_start)s":"%(x_end)s"]
set auto fix
set offsets graph 0, 0, 1, 1
set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
set ytics nomirror
f(x) = mean_y
fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))
plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
mean_y with lines lc rgb '#ffffff' lw 3,\
"gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
'' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
'' using 1:4 with lines lc rgb '#000000' lw 1,\
'' using 1:5 with lines lc rgb '#000000' lw 1""" % \
{
'title': "",
'xlabel': "",
'ylabel': service.getUnit(),
'x_start': "%s" % min(result_dates).strftime(
self.date_format_short),
'x_end': "%s" % max(result_dates).strftime(
self.date_format_short),
'timefmt': r'%Y-%m-%d %H:%M',
'xformat': '%%Y-%%m-%%d\n%%H:%%M',
}
plot_png = plot(str(plotdata), plotscript=str(plotscript),
usefifo=False)
# Temporary PNG data file
fh, data_fn = tempfile.mkstemp(suffix='.png')
os.write(fh, plot_png)
plot_url = data_fn
self.request['to_remove'].append(data_fn)
plot_url = data_fn
else:
plot_url = ""
table = {
'title': "%s: %s (%s)" % (
t(_("Analysis Service")),
service.Title(),
service.getKeyword()
),
'columns': [_('Analysis'),
_('Result'),
_('Analyst'),
_('Captured')],
'parms': [],
'data': tabledata,
'plot_url': plot_url,
}
self.report_data['tables'].append(table)
translate = self.context.translate
## footnotes
if out_of_range_count:
msgid = _("Analyses out of range")
self.report_data['footnotes'].append(
"%s %s" % (error_icon, t(msgid)))
self.report_data['parms'].append(
{"title": _("Analyses out of range"),
"value": out_of_range_count})
title = t(header)
if titles:
title += " (%s)" % " ".join(titles)
return {
'report_title': title,
'report_data': self.template(),
}
def isSamplePointHidden(self):
return isAttributeHidden('AnalysisRequest', 'SamplePoint')
| 1.742188
| 2
|
tests/test_core.py
|
uezo/minette-python
| 31
|
12782711
|
import sys
import os
sys.path.append(os.pardir)
import pytest
from pytz import timezone
from logging import Logger, FileHandler, getLogger
from datetime import datetime
from types import GeneratorType
from minette import (
Minette, DialogService, SQLiteConnectionProvider,
SQLiteContextStore, SQLiteUserStore, SQLiteMessageLogStore,
Tagger, Config, DialogRouter, StoreSet, Message, User, Group,
DependencyContainer, Payload
)
from minette.utils import date_to_unixtime
from minette.tagger.janometagger import JanomeTagger
now = datetime.now()
user_id = "user_id" + str(date_to_unixtime(now))
print("user_id: {}".format(user_id))
class CustomTagger(Tagger):
pass
class CustomConnectionProvider(SQLiteConnectionProvider):
pass
class CustomContextStore(SQLiteContextStore):
pass
class CustomUserStore(SQLiteUserStore):
pass
class CustomMessageLogStore(SQLiteMessageLogStore):
pass
class CustomDataStores(StoreSet):
connection_provider = CustomConnectionProvider
context_store = CustomContextStore
user_store = CustomUserStore
messagelog_store = CustomMessageLogStore
class MyDialog(DialogService):
def compose_response(self, request, context, connection):
return "res:" + request.text
class ErrorDialog(DialogService):
def compose_response(self, request, context, connection):
1 / 0
return "res:" + request.text
class MyDialogRouter(DialogRouter):
def __init__(self, custom_router_arg=None, **kwargs):
super().__init__(**kwargs)
self.custom_attr = custom_router_arg
class TaggerDialog(DialogService):
def compose_response(self, request, context, connection):
return request.to_reply(
text=request.text,
payloads=[Payload(content_type="data", content=request.words)])
class TaggerManuallyParseDialog(DialogService):
def compose_response(self, request, context, connection):
assert request.words == []
request.words = self.dependencies.tagger.parse(request.text, max_length=10)
return request.to_reply(
text=request.text,
payloads=[Payload(content_type="data", content=request.words)])
class TaggerManuallyParseGeneratorDialog(DialogService):
def compose_response(self, request, context, connection):
assert request.words == []
request.words = self.dependencies.tagger.parse_as_generator(request.text, max_length=10)
return request.to_reply(
text=request.text,
payloads=[Payload(content_type="data", content=request.words)])
def test_init():
# without config
bot = Minette()
assert bot.config.get("timezone") == "UTC"
assert bot.timezone == timezone("UTC")
assert isinstance(bot.logger, Logger)
assert bot.logger.name == "minette"
assert isinstance(bot.connection_provider, SQLiteConnectionProvider)
assert isinstance(bot.context_store, SQLiteContextStore)
assert isinstance(bot.user_store, SQLiteUserStore)
assert isinstance(bot.messagelog_store, SQLiteMessageLogStore)
assert bot.default_dialog_service is None
assert isinstance(bot.tagger, Tagger)
def test_init_config():
bot = Minette(config_file="./config/test_config.ini")
assert bot.timezone == timezone("Asia/Tokyo")
for handler in bot.logger.handlers:
if isinstance(handler, FileHandler):
assert handler.baseFilename == \
os.path.join(os.path.dirname(os.path.abspath(__file__)),
bot.config.get("log_file"))
assert bot.connection_provider.connection_str != ""
assert bot.connection_provider.connection_str == \
bot.config.get("connection_str")
assert bot.context_store.timeout == bot.config.get("context_timeout")
assert bot.context_store.table_name == bot.config.get("context_table")
assert bot.user_store.table_name == bot.config.get("user_table")
assert bot.messagelog_store.table_name == \
bot.config.get("messagelog_table")
def test_init_args():
# initialize arguments
config = Config("")
config.confg_parser.add_section("test_section")
config.confg_parser.set("test_section", "test_key", "test_value")
tz = timezone("Asia/Tokyo")
logger = getLogger("test_core_logger")
print(logger.name)
connection_provider = CustomConnectionProvider
context_store = CustomContextStore
user_store = CustomUserStore
messagelog_store = CustomMessageLogStore
data_stores = CustomDataStores
default_dialog_service = MyDialog
dialog_router = MyDialogRouter
tagger = CustomTagger
custom_router_arg = "router_value"
# create bot
bot = Minette(
config=config, timezone=tz, logger=logger,
connection_provider=connection_provider, context_store=context_store,
user_store=user_store, messagelog_store=messagelog_store,
default_dialog_service=default_dialog_service,
dialog_router=dialog_router,
custom_router_arg=custom_router_arg,
tagger=tagger, prepare_table=True
)
assert bot.config.get("test_key", section="test_section") == "test_value"
assert bot.timezone == timezone("Asia/Tokyo")
assert bot.logger.name == "test_core_logger"
assert isinstance(bot.connection_provider, CustomConnectionProvider)
assert isinstance(bot.context_store, CustomContextStore)
assert isinstance(bot.user_store, CustomUserStore)
assert isinstance(bot.messagelog_store, CustomMessageLogStore)
assert bot.default_dialog_service is MyDialog
assert isinstance(bot.dialog_router, MyDialogRouter)
assert bot.dialog_router.custom_attr == "router_value"
assert isinstance(bot.tagger, CustomTagger)
# create bot with data_stores
bot = Minette(
config=config, timezone=tz, logger=logger,
data_stores=data_stores,
default_dialog_service=default_dialog_service,
dialog_router=dialog_router,
custom_router_arg=custom_router_arg,
tagger=tagger, prepare_table=True
)
assert bot.config.get("test_key", section="test_section") == "test_value"
assert bot.timezone == timezone("Asia/Tokyo")
assert bot.logger.name == "test_core_logger"
assert isinstance(bot.connection_provider, CustomConnectionProvider)
assert isinstance(bot.context_store, CustomContextStore)
assert isinstance(bot.user_store, CustomUserStore)
assert isinstance(bot.messagelog_store, CustomMessageLogStore)
assert bot.default_dialog_service is MyDialog
assert isinstance(bot.dialog_router, MyDialogRouter)
assert bot.dialog_router.custom_attr == "router_value"
assert isinstance(bot.tagger, CustomTagger)
def test_get_user():
bot = Minette(prepare_table=True)
with bot.connection_provider.get_connection() as connection:
# register user for test
u = bot.user_store.get(
channel="get_user_test", channel_user_id=user_id,
connection=connection)
u.name = "user channel"
bot.user_store.save(u, connection)
u_detail = bot.user_store.get(
channel="get_user_test_detail", channel_user_id=user_id,
connection=connection)
u_detail.name = "user detail"
bot.user_store.save(u_detail, connection)
# without detail
request = Message(
text="hello", channel="get_user_test", channel_user_id=user_id)
user = bot._get_user(request, connection)
assert user.channel == "get_user_test"
assert user.channel_user_id == user_id
assert user.name == "user channel"
# with detail
bot.config.confg_parser.set("minette", "user_scope", "channel_detail")
request = Message(
text="hello", channel="get_user_test", channel_detail="detail",
channel_user_id=user_id)
user = bot._get_user(request, connection)
assert user.channel == "get_user_test_detail"
assert user.channel_user_id == user_id
assert user.name == "user detail"
def test_save_user():
bot = Minette(prepare_table=True)
with bot.connection_provider.get_connection() as connection:
# register user for test
u = bot.user_store.get(
channel="save_user_test", channel_user_id=user_id,
connection=connection)
u.name = "<NAME>"
# save
bot._save_user(u, connection)
# check
request = Message(
text="hello", channel="save_user_test", channel_user_id=user_id)
user = bot._get_user(request, connection)
assert user.channel == "save_user_test"
assert user.channel_user_id == user_id
assert user.name == "<NAME>"
def test_get_context():
bot = Minette(prepare_table=True)
with bot.connection_provider.get_connection() as connection:
# register context for test
ctx = bot.context_store.get(
channel="get_context_test", channel_user_id=user_id,
connection=connection)
ctx.data["unixtime"] = date_to_unixtime(now)
bot.context_store.save(ctx, connection)
ctx_group = bot.context_store.get(
channel="get_context_test", channel_user_id="group_" + user_id,
connection=connection)
ctx_group.data["unixtime"] = date_to_unixtime(now)
bot.context_store.save(ctx_group, connection)
ctx_detail = bot.context_store.get(
channel="get_context_test_detail", channel_user_id=user_id,
connection=connection)
ctx_detail.data["unixtime"] = date_to_unixtime(now)
bot.context_store.save(ctx_detail, connection)
# without detail
request = Message(
text="hello", channel="get_context_test", channel_user_id=user_id)
context = bot._get_context(request, connection)
assert context.channel == "get_context_test"
assert context.channel_user_id == user_id
assert context.data["unixtime"] == date_to_unixtime(now)
# group without group
request = Message(
text="hello", channel="get_context_test", channel_user_id=user_id)
request.group = Group(id="group_" + user_id)
context = bot._get_context(request, connection)
assert context.channel == "get_context_test"
assert context.channel_user_id == "group_" + user_id
assert context.data["unixtime"] == date_to_unixtime(now)
# with detail
bot.config.confg_parser.set(
"minette", "context_scope", "channel_detail")
request = Message(
text="hello", channel="get_context_test", channel_detail="detail",
channel_user_id=user_id)
context = bot._get_context(request, connection)
assert context.channel == "get_context_test_detail"
assert context.channel_user_id == user_id
assert context.data["unixtime"] == date_to_unixtime(now)
def test_save_context():
bot = Minette(prepare_table=True)
with bot.connection_provider.get_connection() as connection:
# register context for test
ctx = bot.context_store.get(
channel="save_context_test", channel_user_id=user_id,
connection=connection)
ctx.data["unixtime"] = date_to_unixtime(now)
# save
ctx.topic.keep_on = True
bot._save_context(ctx, connection)
# check
request = Message(
text="hello", channel="save_context_test", channel_user_id=user_id)
context = bot._get_context(request, connection)
assert context.channel == "save_context_test"
assert context.channel_user_id == user_id
assert context.data["unixtime"] == date_to_unixtime(now)
def test_chat():
bot = Minette(default_dialog_service=MyDialog)
res = bot.chat("hello")
assert res.messages[0].text == "res:hello"
def test_chat_error():
bot = Minette(default_dialog_service=MyDialog)
bot.connection_provider = None
res = bot.chat("hello")
assert res.messages == []
def test_chat_messagelog_error():
bot = Minette(default_dialog_service=MyDialog)
bot.messagelog_store = None
res = bot.chat("hello")
assert res.messages[0].text == "res:hello"
def test_chat_dialog_error():
bot = Minette(default_dialog_service=ErrorDialog)
res = bot.chat("hello")
assert res.messages[0].text == "?"
def test_chat_timezone():
bot = Minette(default_dialog_service=MyDialog, timezone=timezone("Asia/Tokyo"))
res = bot.chat("hello")
# bot.timezone itself is +9:19
assert res.messages[0].timestamp.tzinfo == datetime.now(tz=bot.timezone).tzinfo
def test_chat_with_tagger():
bot = Minette(
default_dialog_service=TaggerDialog,
tagger=JanomeTagger)
res = bot.chat("今日はいい天気です。")
assert res.messages[0].text == "今日はいい天気です。"
words = res.messages[0].payloads[0].content
assert words[0].surface == "今日"
assert words[1].surface == "は"
assert words[2].surface == "いい"
assert words[3].surface == "天気"
assert words[4].surface == "です"
def test_chat_with_tagger_no_parse():
bot = Minette(
default_dialog_service=TaggerDialog,
tagger=JanomeTagger, tagger_max_length=0)
assert bot.tagger.max_length == 0
res = bot.chat("今日はいい天気です。")
assert res.messages[0].text == "今日はいい天気です。"
words = res.messages[0].payloads[0].content
assert words == []
def test_chat_parse_morph_manually():
bot = Minette(
default_dialog_service=TaggerManuallyParseDialog,
tagger=JanomeTagger, tagger_max_length=0)
bot.dialog_uses(tagger=bot.tagger)
res = bot.chat("今日はいい天気です。")
assert res.messages[0].text == "今日はいい天気です。"
words = res.messages[0].payloads[0].content
assert words[0].surface == "今日"
assert words[1].surface == "は"
assert words[2].surface == "いい"
assert words[3].surface == "天気"
assert words[4].surface == "です"
def test_chat_parse_morph_manually_generator():
bot = Minette(
default_dialog_service=TaggerManuallyParseGeneratorDialog,
tagger=JanomeTagger, tagger_max_length=0)
bot.dialog_uses(tagger=bot.tagger)
res = bot.chat("今日はいい天気です。")
assert res.messages[0].text == "今日はいい天気です。"
assert isinstance(res.messages[0].payloads[0].content, GeneratorType)
words = [w for w in res.messages[0].payloads[0].content]
assert words[0].surface == "今日"
assert words[1].surface == "は"
assert words[2].surface == "いい"
assert words[3].surface == "天気"
assert words[4].surface == "です"
def test_dialog_uses():
class HighCostToCreate:
pass
class OnlyForFooDS:
pass
class FooFialog(DialogService):
pass
# run once when create bot
hctc = HighCostToCreate()
offds = OnlyForFooDS()
# create bot
bot = Minette()
# set dependencies to dialogs
bot.dialog_uses(
{
FooFialog: {"api": offds}
},
highcost=hctc
)
assert bot.dialog_router.dependencies.highcost == hctc
assert hasattr(bot.dialog_router.dependencies, "api") is False
assert bot.dialog_router.dependency_rules[FooFialog]["api"] == offds
# create bot and not set dialog dependencies
bot_no_dd = Minette()
assert bot_no_dd.dialog_router.dependencies is None
bot_no_dd.dialog_uses()
assert isinstance(bot_no_dd.dialog_router.dependencies, DependencyContainer)
| 2.1875
| 2
|
performance_vs_training.py
|
medvidov/IMaSC
| 0
|
12782712
|
<filename>performance_vs_training.py
from __future__ import unicode_literals, print_function
import plac
import random
import warnings
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
import json
import imblearn
from utils import prodigy_to_spacy
from metrics_clean import Metrics
from tqdm import tqdm
class PerformanceVsTraining:
def __init__(self, n:int = 20, verbose:bool = False, train_path: str = "training_annotations.jsonl", test_path: str = "shaya_validate_test.jsonl", label: list = ['INSTRUMENT', 'SPACECRAFT']) -> None:
#starters from parameters
self.train_path = train_path
self.train_file = None
self.test_path = test_path
self.test_file = None
self.num_data_points = n
self.anns_per_point = None
self.anns_this_round = 0 #changes with each round
self.label = label
self.metrics = Metrics("Baby", "shaya_validate_test.jsonl")
self.t_vs_p = {}
self.nlp = None
self.verbose = verbose
def _reset_data(self) -> None:
#reset all metrics things between rounds
self.tp = 0.0
self.fp = 0.0
self.fn = 0.0
self.truths = set()
self.guesses = set()
self.num_truths = 0
self.accuracy = 0.0
self.recall = 0.0
self.f1 = 0.0
self.precision = 0.0
self.data_annotated = open(self.annotated_path)
self.data_raw = open(self.raw_path)
def _prep_data(self) -> None:
self.train_file = prodigy_to_spacy(self.train_path)
num_anns = sum(1 for item in self.train_file) #total number of annotations
self.train_file = prodigy_to_spacy(self.train_path)
self.anns_per_point = num_anns / self.num_data_points
self.test_file = prodigy_to_spacy(self.test_path)
def _run_metrics(self) -> int:
return self.metrics.calculate()
def _train_one_round(self, i: int) -> None:
n_iter = 100 #number of iterations. could make this customizable but I feel that it would be too messy
#train model and save to self.nlp
self.anns_this_round = i * self.anns_per_point
if self.verbose:
print("Training on %s annotations" % (self.anns_this_round))
count = 0
train_data = []
for line in self.train_file:
train_data.append(line)
count += 1
if count >= self.anns_this_round:
break
"""Set up the pipeline and entity recognizer, and train the new entity."""
random.seed(0)
self.nlp = spacy.blank("en") # create blank Language class
# Add entity recognizer to model if it's not in the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if "ner" not in self.nlp.pipe_names:
ner = self.nlp.create_pipe("ner")
self.nlp.add_pipe(ner)
# otherwise, get it, so we can add labels to it
else:
ner = self.nlp.get_pipe("ner")
for label in self.label:
ner.add_label(label) # add new entity label to entity recognizer
optimizer = self.nlp.begin_training()
move_names = list(ner.move_names)
# get names of other pipes to disable them during training
pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"]
other_pipes = [pipe for pipe in self.nlp.pipe_names if pipe not in pipe_exceptions]
# only train NER
with self.nlp.disable_pipes(*other_pipes) and warnings.catch_warnings():
# show warnings for misaligned entity spans once
warnings.filterwarnings("once", category=UserWarning, module='spacy')
sizes = compounding(1.0, 4.0, 1.001)
# batch up the examples using spaCy's minibatch
for itn in range(n_iter):
random.shuffle(train_data)
# Need some oversampling somewhere in here
batches = minibatch(train_data, size=sizes)
losses = {}
for batch in batches:
texts, annotations = zip(*batch)
self.nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses)
#print("Losses", losses)
output_dir = Path("Baby")
if not output_dir.exists():
output_dir.mkdir()
self.nlp.meta["name"] = "BabyModel" # rename model
self.nlp.to_disk(output_dir)
def run_test(self):
self._prep_data()
for i in tqdm(range(1, self.num_data_points + 1)):
self._train_one_round(i)
f1 = self._run_metrics()
self.t_vs_p[round(self.anns_this_round,3)] = round(f1, 3)
print(self.t_vs_p)
# @plac.annotations(
# model=("Model name. Defaults to blank 'en' model.", "option", "m", str),
# new_model_name=("New model name for model meta.", "option", "nm", str),
# output_dir=("Optional output directory", "option", "o", Path),
# n_iter=("Number of training iterations", "option", "n", int),
# )
def main():
p = PerformanceVsTraining(100, True)
p.run_test()
if __name__ == "__main__":
plac.call(main)
| 2.359375
| 2
|
pySDC/tests/test_Q_transfer.py
|
janEbert/pySDC
| 0
|
12782713
|
import nose
import numpy as np
from numpy.polynomial.polynomial import polyval
import pySDC.helpers.transfer_helper as th
from pySDC.core.Collocation import CollBase
from pySDC.tests.test_helpers import get_derived_from_in_package
classes = []
def setup():
global classes, t_start, t_end
# generate random boundaries for the time slice with 0.0 <= t_start < 0.2 and 0.8 <= t_end < 1.0
t_start = np.random.rand(1) * 0.2
t_end = 0.8 + np.random.rand(1) * 0.2
classes = get_derived_from_in_package(CollBase, 'pySDC/implementations/collocation_classes')
@nose.tools.with_setup(setup)
def test_Q_transfer():
for collclass in classes:
yield check_Q_transfer, collclass
def check_Q_transfer(collclass):
"""
A simple test program to check the order of the Q interpolation/restriction
"""
for M in range(3, 9):
Mfine = M
Mcoarse = int((Mfine+1)/2.0)
coll_fine = collclass(Mfine, 0, 1)
coll_coarse = collclass(Mcoarse, 0, 1)
assert coll_fine.left_is_node == coll_coarse.left_is_node, 'ERROR: should be using the same class for coarse and fine Q'
fine_grid = coll_fine.nodes
coarse_grid = coll_coarse.nodes
for order in range(2,coll_coarse.num_nodes+1):
Pcoll = th.interpolation_matrix_1d(fine_grid, coarse_grid, k=order, pad=0, equidist_nested=False)
Rcoll = th.restriction_matrix_1d(fine_grid, coarse_grid, k=order, pad=0)
for polyorder in range(1,order+2):
coeff = np.random.rand(polyorder)
ufine = polyval(fine_grid,coeff)
ucoarse = polyval(coarse_grid,coeff)
uinter = Pcoll.dot(ucoarse)
urestr = Rcoll.dot(ufine)
err_inter = np.linalg.norm(uinter-ufine, np.inf)
err_restr = np.linalg.norm(urestr-ucoarse, np.inf)
if polyorder <= order:
assert err_inter < 2E-15, "ERROR: Q-interpolation order is not reached, got %s" %err_inter
assert err_restr < 2E-15, "ERROR: Q-restriction order is not reached, got %s" % err_restr
else:
assert err_inter > 2E-15, "ERROR: Q-interpolation order is higher than expected, got %s" % polyorder
@nose.tools.with_setup(setup)
def test_Q_transfer_minimal():
for collclass in classes:
yield check_Q_transfer_minimal, collclass
def check_Q_transfer_minimal(collclass):
"""
A simple test program to check the order of the Q interpolation/restriction for only 2 coarse nodes
"""
Mcoarse = 2
coll_coarse = collclass(Mcoarse, 0, 1)
for M in range(3, 9):
Mfine = M
coll_fine = collclass(Mfine, 0, 1)
assert coll_fine.left_is_node == coll_coarse.left_is_node, 'ERROR: should be using the same class for coarse and fine Q'
fine_grid = coll_fine.nodes
coarse_grid = coll_coarse.nodes
Pcoll = th.interpolation_matrix_1d(fine_grid, coarse_grid, k=2, pad=0, equidist_nested=False)
Rcoll = th.restriction_matrix_1d(fine_grid, coarse_grid, k=2, pad=0)
for polyorder in range(1,3):
coeff = np.random.rand(polyorder)
ufine = polyval(fine_grid,coeff)
ucoarse = polyval(coarse_grid,coeff)
uinter = Pcoll.dot(ucoarse)
urestr = Rcoll.dot(ufine)
err_inter = np.linalg.norm(uinter-ufine, np.inf)
err_restr = np.linalg.norm(urestr-ucoarse, np.inf)
if polyorder <= 2:
assert err_inter < 2E-15, "ERROR: Q-interpolation order is not reached, got %s" %err_inter
assert err_restr < 2E-15, "ERROR: Q-restriction order is not reached, got %s" % err_restr
else:
assert err_inter > 2E-15, "ERROR: Q-interpolation order is higher than expected, got %s" % polyorder
| 2.03125
| 2
|
examples/basic/hello_world_with_magic.py
|
osomdev/pyshrimp
| 6
|
12782714
|
#!/usr/bin/env pyshrimp
# $opts: magic
from pyshrimp import log, shell_cmd
print('You can run this as any other script')
print('But then what is the point? :)')
log('You can use log with a bit more details!')
log('The log is initialized by run... but with magic it gets magically invoked!')
log('To do that just add magic to opts: # $opts: magic')
log('The downside is: script will run differently when invoked directly using %> python script.py')
log('Also if you forget to turn on magic those logs will not appear...')
shell_cmd('echo You can also run shell scripts easily', capture=False).exec()
| 2.09375
| 2
|
src/modules/agents/__init__.py
|
simsimiSION/pymarl-algorithm-extension
| 10
|
12782715
|
<gh_stars>1-10
REGISTRY = {}
from .rnn_agent import RNNAgent
from .commnet_agent import CommAgent
from .g2a_agent import G2AAgent
from .maven_agent import MAVENAgent
REGISTRY["rnn"] = RNNAgent
REGISTRY['commnet'] = CommAgent
REGISTRY['g2a'] = G2AAgent
REGISTRY['maven'] = MAVENAgent
| 1.117188
| 1
|
tool/klint/fullstack/reg_util.py
|
kylerky/klint
| 2
|
12782716
|
import claripy
from kalm import utils
from . import ast_util
from . import spec_act
from . import spec_reg
# TODO: Akvile had put a cache here, which is a good idea since the read-then-write pattern is common;
# I removed it cause it depended on state.globals, but we should put it back somehow
def __constrain_field(symb, start, end, value):
"""
Makes the constrain symb[end:start] = value on the state solver.
"""
if value & (2**(1 + end - start) - 1) != value:
raise Exception(f"The value {value} does not fit in the specified range {symb}[{end}:{start}].")
value = claripy.BVV(value, end - start + 1)
if start == 0:
if end == symb.size() - 1:
return value
return symb[symb.size()-1:end+1].concat(value)
if end == symb.size() - 1:
return value.concat(symb[start-1:0])
return symb[symb.size()-1:end+1].concat(value).concat(symb[start-1:0])
def __init_reg_val_symb(name, data):
"""
Creates and returns a Symbolic Bit Vector for the identified register based on
initial register field values
:param name: the name of the register
:param data: dictionary associated with the register reg
:return: the symbolic register value
"""
symb = claripy.BVS(name, data['length'])
last = 0 # Last unconstrained bit
for field, info in data['fields'].items():
if info['init'] == 'X': # Floating value can be modeled as uncontrained
last = info['end'] + 1
continue
if last != info['start']: # There is an implicit Reserved block
symb = __constrain_field(symb, last, info['start'] - 1, 0)
last = info['start']
symb = __constrain_field(symb, info['start'], info['end'], info['init'])
last = info['end'] + 1
if last != data['length']: # There is a reserved field at the end
symb = __constrain_field(symb, last, data['length'] - 1, 0)
return symb
def __init_reg_val_con(data):
"""
Creates and returns a Bit Vector for the indentified register based on
initial register field values. Returns None if the register cannot be
made concrete.
:param data: dictionary associated with the register
:return: BVV or None
"""
value = 0
for field, info in data['fields'].items():
if info['init'] == 'X': # Floating value can be modeled as uncontrained
return None
value = value | (info['init'] << info['start'])
bvv = claripy.BVV(value, data['length'])
return bvv
def get_pci_reg(base, spec):
for name, info in spec.items():
b, m, _ = info['addr'][0]
assert m == 0, "PCI m must be 0"
if b == base:
return name
raise Exception(f"PCI register with address 0x{base:x} is not in the spec.")
def find_reg_from_addr(state, addr, _cache={}):
if len(_cache) == 0:
for reg, data in spec_reg.registers.items():
idx = 0
for b, m, l in data['addr']:
for n in range(0, l+1-idx):
_cache[b + n*m] = (reg, n+idx)
idx += l + 1
"""
Finds which register the address refers to.
:return: the name of the register and its index.
"""
# Optimization: if addr isn't symbolic then deal with it quickly
if not isinstance(addr, claripy.ast.Base) or not addr.symbolic:
conc_addr = state.solver.eval(addr)
cached = _cache.get(conc_addr, None)
if cached is not None:
return cached
for reg, data in spec_reg.registers.items():
len_bytes = data['length'] // 8
idx = 0
for b, m, l in data['addr']:
high = b + (l-idx)*m + len_bytes
if b <= conc_addr and conc_addr < high:
reg_index = 0 if m == 0 else ((conc_addr - b) // m + idx)
if int(reg_index) == reg_index:
reg_index = int(reg_index) # they compare as equal but without this reg_index is still a float
#print(f"{reg}[{reg_index}]")
return reg, reg_index
idx += l + 1
raise Exception("Need to double-check logic below for symbolic indices...")
n = claripy.BVS("n", 64)
for reg, data in spec_reg.registers.items():
len_bytes = data['length'] // 8
p = 0
for b, m, l in data['addr']:
low = b + (n-p)*m
high = low + len_bytes
constraint = (n - p >= 0) & (n <= l) & (low <= addr) & (addr < high)
if utils.definitely_false(state.solver, constraint): # Continue the search
p += l + 1
continue
if m != 0:
n_con = state.solver.eval(n, extra_constraints=[constraint])
#if not (n_con in state.globals['indices']):
# state.globals['indices'] += [n_con]
#print(f"{reg}[{n_con}]")
return reg, n_con
#print(f"{reg}")
return reg, None
raise Exception(f"Cannot find register at {addr}.")
def is_reg_indexed(data):
_, m, _ = data['addr'][0]
return (m != 0)
def fetch_reg(reg_dict, reg, index, data, use_init):
"""
Fetches register from state global store. Initialises it if needed.
"""
if reg in reg_dict:
d = reg_dict[reg]
if is_reg_indexed(data):
if index in d.keys():
return d[index]
#else:
# raise "what do I do here?"
else:
return d
if use_init:
reg_bv = __init_reg_val_con(data)
if reg_bv is None:
# If a concrete value cannot be created, try symbolic
reg_bv = __init_reg_val_symb(reg, data)
else:
reg_bv = claripy.BVS(reg, data['length'])
update_reg(reg_dict, reg, index, data, reg_bv)
return reg_bv
def update_reg(reg_dict, reg, index, data, expr):
"""
Update register value in the state.
:param data: dictionary associated with the register reg
"""
if not is_reg_indexed(data):
reg_dict[reg] = expr
elif reg in reg_dict:
reg_dict[reg][index] = expr
else:
reg_dict[reg] = {index: expr}
def find_fields_on_write(state, prev, new, reg, spec):
"""
Finds which named fields of the register have been changed and
returns this information as a list.
"""
data = spec[reg]
fields = []
for field, info in data['fields'].items():
s = info['start']
e = info['end']
if not (prev[e:s] == new[e:s]).is_true(): #ideally, utils.can_be_false(state.solver, prev[e:s] == new[e:s]), but that's slow so let's be conservative
p = prev[e:s]
n = new[e:s]
fields += [(field, p, n)]
return fields
def check_access_write(old_val, new_val, reg, data, fields):
"""
Determines which fields are written and whether it is legal
to do so.
"""
reg_access = data['access']
if len(fields) == 0 and reg_access == spec_reg.Access.RO:
# NOTE: This permits writing to reserved fields
raise Exception(f"Illegal attempt to write to register {reg}")
for i, f_info in enumerate(fields):
(f, p, n) = f_info
field_access = data['fields'][f]['access']
if field_access == spec_reg.Access.IW:
fields[i] = (fields[i][0],fields[i][1],fields[i][1]) # new is prev
return
illegal = (field_access == spec_reg.Access.NA)
illegal |= (field_access == spec_reg.Access.RO)
if illegal:
raise Exception(f"Illegal attempt to write to {reg}.{f}")
def change_reg_field(state, device, name, index, registers, new):
"""
Changes a single field in a register and saves the new value.
:param name: register indentifier of the form REG.FIELD
:param register: register spec
:param new: new field value. If the field is to be made
symbolic, should be 'X'.
"""
reg, field = name.split('.', 1)
data = registers[reg]
prev = -1
dev_regs = device.regs
if registers == spec_reg.pci_regs:
dev_regs = device.pci_regs
reg_old = fetch_reg(dev_regs, reg, index, data, device.use_init[0])
reg_new = None
f_info = data['fields'][field]
if reg_old.op == 'BVV' and new != 'X':
val = 0
if f_info['start'] > 0:
before = state.solver.eval_one(reg_old[f_info['start']-1:0])
val = val | before
val = val | (new << f_info['start'])
if f_info['end'] < data['length'] - 1:
after = state.solver.eval_one(reg_old[data['length']-1:f_info['end']+1])
val = val | (after << f_info['end']+1)
reg_new = claripy.BVV(val, data['length'])
else:
if new == 'X':
raise "oops"
value_len = f_info['end'] - f_info['start'] + 1
if f_info['start'] == 0:
reg_new = claripy.BVV(new, value_len)
else:
reg_new = claripy.BVV(new, value_len).concat(reg_old[f_info['start']-1:0])
if f_info['end'] < data['length'] - 1:
reg_new = reg_old[data['length']-1:f_info['end']+1].concat(reg_new)
update_reg(dev_regs, reg, index, data, reg_new)
def verify_write(state, device, fields, reg, index, reg_dict, _cache={}):
"""
Verifies if the write can be matched to an action.
Raises an exception if it can't be matched.
"""
if len(_cache) == 0:
for action, info in spec_act.actions.items():
for r in info['action'].getRegisters():
if r in _cache:
_cache[r].append((action, info))
else:
_cache[r] = [(action, info)]
counter = device.counter[0]
for f_info in fields:
(f, prev, new) = f_info
# Actions which preconditions fail - useful for debuging
rejected = []
# The write to this field is invalid until a matching
# action is found
valid = False
if reg_dict[reg]['fields'][f]['access'] == spec_reg.Access.IW:
# Validating this field is optional
valid = True
for action, info in _cache.get(reg, []):
# Does the action match writing to this field?
action_matches = False
if reg_dict[reg]['fields'][f]['end'] != reg_dict[reg]['fields'][f]['start']:
action_matches = info['action'].isWriteFieldCorrect(state, f"{reg}.{f}", new)
elif info['action'].isFieldSetOrCleared(f"{reg}.{f}", ast_util.AST.Set) and utils.definitely_true(state.solver, new == claripy.BVV(-1, new.size())):
action_matches = True
elif info['action'].isFieldSetOrCleared(f"{reg}.{f}", ast_util.AST.Clear) and utils.definitely_true(state.solver, new == 0):
action_matches = True
if not action_matches:
continue
# If there is no precondition, the action is valid
precond_sat = True
if info['precond'] != None:
con = info['precond'].generateConstraints(device, spec_reg.registers, spec_reg.pci_regs, index)
precond_sat = utils.definitely_true(state.solver, con)
if not precond_sat:
rejected.append(action)
continue
valid = True
print("Action: ", action)
if action == 'Initiate Software Reset':
device.use_init[0] = True
device.latest_action[0] = action
if action in device.actions.keys():
# We have seen this action before
device.actions[action] = device.actions[action] + [counter]
else:
device.actions[action] = [counter]
if valid:
continue
if len(rejected) == 0:
raise Exception(f"Cannot validate writing to {reg}.{f}. There are no actions that match writing to this field.")
if not valid:
raise Exception(f"Cannot validate writing to {reg}.{f}. Matching but rejected actions: {rejected}. Maybe the precondition is not satisfied for one of them?")
# If we did not raise any exception, that means we are able to match
# concurrent writes to actions. Increment counter to establish
# action order.
device.counter[0] = counter + 1
| 2.078125
| 2
|
cohesivenet/macros/admin.py
|
cohesive/python-cohesivenet-sdk
| 0
|
12782717
|
<reponame>cohesive/python-cohesivenet-sdk<gh_stars>0
import time
from typing import Dict, List
from cohesivenet import VNS3Client, data_types
from cohesivenet.macros import api_operations
def roll_api_password(
new_password, clients: List[VNS3Client]
) -> data_types.BulkOperationResult:
"""roll_api_password
Update all passwords for clients
Arguments:
new_password {str}
clients {List[VNS3Client]}
Returns:
BulkOperationResult - tuple containing the clients that
succeeded and the clients that failed with their exceptions
"""
def _update_password(_client):
resp = _client.config.put_update_api_password(password=<PASSWORD>)
_client.configuration.password = <PASSWORD>
return resp
return api_operations.__bulk_call_client(
clients, _update_password, parallelize=True
)
def disable_uis(clients: List[VNS3Client]):
"""disable_uis
Disable all UIs for clients
Arguments:
clients {List} -- List of VNS3Clients
Returns:
BulkOperationResult
"""
def _disable_ui(_client):
resp = _client.config.put_update_admin_ui(enabled=False)
# required to avoid 502 from api resetting itself
time.sleep(2.0)
return resp
return api_operations.__bulk_call_client(clients, _disable_ui)
def roll_ui_credentials(
new_credentials: Dict, clients: List[VNS3Client], enable_ui=False
):
"""Update UI credentials to common credentials
Arguments:
new_credentials {dict} -- {username: str, password: str}
clients {List} -- List of VNS3 clients
enable_ui {Bool} -- whether to enable UI
Returns:
BulkOperationResult
"""
assert "username" in new_credentials, '"username" required in new_credentials arg'
assert "password" in new_credentials, '"password" required in new_credentials arg'
def _update_ui_credentials(_client):
resp = _client.config.put_update_admin_ui(
**{
"admin_username": new_credentials.get("username"),
"admin_password": <PASSWORD>.get("password"),
"enabled": enable_ui,
}
)
# required to avoid 502 from api resetting itself
time.sleep(2.0)
return resp
return api_operations.__bulk_call_client(
clients, _update_ui_credentials, parallelize=True
)
| 2.3125
| 2
|
streamlit_server_state/hash.py
|
whitphx/streamlit-server-state
| 20
|
12782718
|
from typing import Any, Optional, Tuple, Union
ReprHash = Union[str, int]
ObjDictHash = Optional[str]
Hash = Tuple[ReprHash, ObjDictHash]
def calc_hash(val: Any) -> Hash:
dict_hash: ObjDictHash = None
if hasattr(val, "__dict__") and isinstance(val.__dict__, dict):
dict_hash = repr(val.__dict__)
repr_hash: ReprHash
try:
repr_hash = repr(val)
except Exception:
repr_hash = id(val)
return (repr_hash, dict_hash)
| 2.890625
| 3
|
codigos-aula/cod3.py
|
maumneto/exercicio-python
| 0
|
12782719
|
<reponame>maumneto/exercicio-python<gh_stars>0
# import math
peso = float(input('Qual o seu peso: '))
altura = float(input('Qual a sua altura: '))
imc = peso/(altura*altura)
# imc = peso/(math.pow(altura, 2))
print('O resultado do IMC é ', imc)
| 3.515625
| 4
|
run.py
|
Bidulman/bidoyon
| 0
|
12782720
|
from app import app
HOST = "localhost"
PORT = 5000
if __name__ == '__main__':
app.run(HOST, PORT, debug=True)
| 1.8125
| 2
|
aegea/top.py
|
lvreynoso/aegea
| 57
|
12782721
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys
from datetime import datetime
from typing import List
import boto3
import botocore.exceptions
from . import register_parser
from .util import ThreadPoolExecutor
from .util.printing import format_table, page_output
def get_stats_for_region(region):
try:
session = boto3.Session(region_name=region)
num_instances = len(list(session.resource("ec2").instances.all()))
num_amis = len(list(session.resource("ec2").images.filter(Owners=["self"])))
num_vpcs = len(list(session.resource("ec2").vpcs.all()))
num_enis = len(list(session.resource("ec2").network_interfaces.all()))
num_volumes = len(list(session.resource("ec2").volumes.all()))
except botocore.exceptions.ClientError:
num_instances, num_amis, num_vpcs, num_enis, num_volumes = ["Access denied"] * 5 # type: ignore
return [region, num_instances, num_amis, num_vpcs, num_enis, num_volumes]
def top(args):
table = [] # type: List[List]
columns = ["Region", "Instances", "AMIs", "VPCs", "Network interfaces", "EBS volumes"]
executor = ThreadPoolExecutor()
table = list(executor.map(get_stats_for_region, boto3.Session().get_available_regions("ec2")))
page_output(format_table(table, column_names=columns, max_col_width=args.max_col_width))
parser = register_parser(top, help='Show an overview of AWS resources per region')
| 2.28125
| 2
|
src/domain/cargo_space.py
|
KlemenGrebovsek/Cargo-stowage-optimization
| 2
|
12782722
|
from numpy import ndarray
from src.domain.cs_column import Column
import numpy as np
from src.model.stop_at_station_summary import StopAtStationSummary
class CargoSpace(object):
""" Represents cargo space in transport vehicle/ship ect.
"""
def __init__(self, width: int, height: int):
self._width: int = width
self._height: int = height
self._columns: list = [Column(height) for i in range(width)]
@property
def columns(self) -> list:
return self._columns
def simulate_stop_at_station(self, station_index: int, packages_to_load: list) -> StopAtStationSummary:
""" Simulates stop at station, unloads, loads packages and monitors activities.
Args:
station_index: Current station index.
packages_to_load: List of packages to load at this station.
Returns: Summary of process and current state of cargo space.
"""
movements_sum = 0
wait_que = []
packages_per_col = np.zeros(len(self._columns), dtype=int)
# Unload packages for current station.
movements_sum += self._unload_packages(packages_per_col, wait_que, station_index)
# Load packages for current station.
movements_sum += self._load_packages(packages_to_load, packages_per_col)
# Load packages from waiting que.
movements_sum += self._load_packages(wait_que, packages_per_col)
return StopAtStationSummary(
movements_sum=movements_sum,
layout_dist=packages_per_col.tolist(),
weight_dist=[column.sum_weight for column in self._columns]
)
def _unload_packages(self, packages_per_col: ndarray, wait_que: list, station_index: int) -> int:
movement = 0
for index, column in enumerate(self._columns):
ret_que, ret_movements = column.unload_at_station(station_index)
movement += ret_movements
wait_que += ret_que
packages_per_col[index] = column.count()
return movement
def _load_packages(self, packages_to_load: list, packages_per_col: ndarray) -> int:
movements = 0
for package in packages_to_load:
add_index = package.given_col_index
if packages_per_col[add_index] == self._height:
add_index = np.argmin(packages_per_col)
self._columns[add_index].add(package)
packages_per_col[add_index] += 1
movements += 1
return movements
| 2.625
| 3
|
src/pycropml/pparse.py
|
sielenk-yara/PyCrop2ML
| 0
|
12782723
|
""" License, Header
"""
from __future__ import absolute_import
from __future__ import print_function
from copy import copy
import xml.etree.ElementTree as xml
from . import modelunit as munit
from . import description
from . import inout
from . import parameterset as pset
from . import checking
from . import algorithm
from . import function
from . import initialization
import os.path
import os
from path import Path
class Parser(object):
""" Read an XML file and transform it in our object model.
"""
def parse(self, crop2ml_dir):
raise Exception('Not Implemented')
def dispatch(self, elt):
return self.__getattribute__(elt.tag)(elt)
class ModelParser(Parser):
""" Read an XML file and transform it in our object model.
"""
def parse(self, crop2ml_dir):
self.models = []
self.crop2ml_dir = crop2ml_dir
xmlrep = Path(os.path.join(self.crop2ml_dir,'crop2ml'))
self.algorep = Path(os.path.join(self.crop2ml_dir,'crop2ml'))
fn = xmlrep.glob('unit*.xml')+xmlrep.glob('function*.xml')+xmlrep.glob('init*.xml')
try:
for f in fn:
# Current proxy node for managing properties
doc = xml.parse(f)
root = doc.getroot()
self.dispatch(root)
except Exception as e:
print(("%s is NOT in CropML Format ! %s" % (f, e)))
return self.models
def dispatch(self, elt):
#try:
return self.__getattribute__(elt.tag)(elt)
#except Exception, e:
# print e
#raise Exception("Unvalid element %s" % elt.tag)
def ModelUnit(self, elts):
""" ModelUnit (Description,Inputs,Outputs,Algorithm,Parametersets,
Testsets)
"""
#print('ModelUnit')
kwds = elts.attrib
self._model = munit.ModelUnit(kwds)
self._model.path = os.path.abspath(self.crop2ml_dir)
self.models.append(self._model)
for elt in list(elts):
self.dispatch(elt)
def Description(self, elts):
""" Description (Title,Author,Institution,Reference,Abstract)
"""
#print('Description')
desc = description.Description()
for elt in list(elts):
self.name = desc.__setattr__(elt.tag, elt.text)
self._model.add_description(desc)
def Inputs(self, elts):
""" Inputs (Input)
"""
#print('Inputs')
for elt in list(elts):
self.dispatch(elt)
def Input(self, elts):
""" Input
"""
#print('Input: ')
properties = elts.attrib
_input = inout.Input(properties)
self._model.inputs.append(_input)
def Outputs(self, elts):
""" Ouputs (Output)
"""
#print('Outputs')
for elt in list(elts):
self.dispatch(elt)
def Output(self, elts):
""" Output
"""
#print('Output: ')
properties = elts.attrib
_output = inout.Output(properties)
self._model.outputs.append(_output)
def Initialization(self, elt):
language=elt.attrib["language"]
name=elt.attrib["name"]
filename=elt.attrib["filename"]
#description =elt.attrib["description"]
code = initialization.Initialization(name,language, filename)
self._model.initialization.append(code)
def Function(self, elt):
language=elt.attrib["language"]
name=elt.attrib["name"]
filename=elt.attrib["filename"]
type=elt.attrib["type"]
description =elt.attrib["description"]
code = function.Function(name, language, filename, type, description)
self._model.function.append(code)
def Algorithm(self, elt):
""" Algorithm
"""
#print('Algorithm')
language=elt.attrib["language"]
platform=elt.attrib["platform"]
if "filename" in elt.attrib:
filename=elt.attrib["filename"]
#file = self.algorep/ os.path.splitext(filename)[1][1:]/filename
file = Path(os.path.join(self.algorep,filename))
with open(file, 'r') as f:
development = f.read()
algo = algorithm.Algorithm(language, development, platform, filename)
else:
development = elt.text
algo = algorithm.Algorithm(language, development, platform)
self._model.algorithms.append(algo)
def Parametersets(self, elts):
""" Parametersets (Parameterset)
"""
#print('Parametersets')
for elt in list(elts):
self.Parameterset(elt)
def Parameterset(self, elts):
""" Parameterset
"""
#print('Parameterset: ')
properties = elts.attrib
name = properties.pop('name')
_parameterset = pset.parameterset(self._model, name, properties)
for elt in list(elts):
self.param(_parameterset, elt)
name = _parameterset.name
self._model.parametersets[name] = _parameterset
def param(self, pset, elt):
""" Param
"""
#print('Param: ', elt.attrib, elt.text)
properties = elt.attrib
name = properties['name']
pset.params[name] = elt.text
def Testsets(self, elts):
""" Testsets (Testset)
"""
#print('Testsets')
for elt in list(elts):
self.Testset(elt)
self.testsets = self._model.testsets
def Testset(self, elts):
""" Testset(Test)
"""
#print('Testset')
properties = elts.attrib
name = properties.pop('name')
#print name
_testset = checking.testset(self._model, name, properties)
for elt in list(elts):
#print elt
testname = elt.attrib['name'] # name of test
#print(testname)
input_test={}
output_test={}
param_test={}
#_test = checking.Test(name)
for j in elt.findall("InputValue"): # all inputs
name = j.attrib["name"]
input_test[name]=j.text
for j in elt.findall("OutputValue"): # all outputs
name = j.attrib["name"]
if len(j.attrib)==2:
output_test[name]=[j.text,j.attrib["precision"]]
else: output_test[name]=[j.text]
param_test = {"inputs":input_test, "outputs":output_test}
_testset.test.append({testname:param_test})
#self._model.testsets.setdefault(name, []).append(_testset)
self._model.testsets.append(_testset)
def model_parser(crop2ml_dir):
""" Parse a set of models as xml files contained in crop2ml directory
and algorithm in src directory
This function returns models as python object.
Returns ModelUnit object of the Crop2ML Model.
"""
parser = ModelParser()
return parser.parse(crop2ml_dir)
| 2.375
| 2
|
tests/test_chi_water.py
|
noahkconley/city-scrapers
| 0
|
12782724
|
from datetime import date, time
import pytest
import json
from city_scrapers.spiders.chi_water import Chi_waterSpider
test_response = []
with open('tests/files/chi_water_test.json') as f:
test_response.extend(json.loads(f.read()))
spider = Chi_waterSpider()
# This line throws error
parsed_items = [item for item in spider._parse_events(test_response)]
##### Test Single Instance #####
def test_name():
assert parsed_items[0]['name'] == 'Board of Commissioners'
def test_description():
assert parsed_items[0]['event_description'] == 'no agenda posted'
def test_start_time():
assert parsed_items[0]['start'] == {
'date': date(2018, 12, 20),
'time': time(10, 30, 00),
'note': ''
}
def test_id():
assert parsed_items[0]['id'] == 'chi_water/201812201030/x/board_of_commissioners'
def test_location():
assert parsed_items[0]['location'] == {
'address': '100 East Erie Street Chicago, IL 60611',
'name': 'Board Room',
'neighborhood': 'River North'
}
def test_sources():
assert parsed_items[0]['sources'] == [{'note': '',
'url': 'https://mwrd.legistar.com/DepartmentDetail.aspx?ID=1622&GUID=5E16B4CD-0692-4016-959D-3F080D6CFFB4'}]
def test_documents():
assert parsed_items[0]['documents'] == [
{
'url': 'https://mwrd.legistar.com/MeetingDetail.aspx?ID=570944&GUID=DF1E81E4-2660-42AF-A398-8296420B9341&Options=info&Search=',
'note': 'meeting details'
},
]
def test_documents_with_agenda():
assert parsed_items[-2]['documents'] == [
{'url': 'https://mwrd.legistar.com/MeetingDetail.aspx?ID=437015&GUID=639F6AB7-6E76-4429-B6F5-FCEB3DC609C5&Options=info&Search=',
'note': 'meeting details'},
{'url': 'https://mwrd.legistar.com/View.ashx?M=A&ID=437015&GUID=639F6AB7-6E76-4429-B6F5-FCEB3DC609C5',
'note': 'agenda'}
]
@pytest.mark.parametrize('item', parsed_items)
def test_name_not_study_session(item):
assert item['name'] != 'Study Session'
def test_status():
assert parsed_items[-1]['status'] == 'passed'
##### Parameterized Tests #####
@pytest.mark.parametrize('item', parsed_items)
def test_type(item):
assert item['_type'] is 'event'
@pytest.mark.parametrize('item', parsed_items)
def test_all_day(item):
assert item['all_day'] is False
@pytest.mark.parametrize('item', parsed_items)
def test_classification(item):
assert item['classification'] == ''
@pytest.mark.parametrize('item', parsed_items)
def test_end_time(item):
assert item['end'] == {
'date': None,
'time': None,
'note': ''
}
| 2.6875
| 3
|
database_service/run.py
|
Fox520/PaymentGateway
| 1
|
12782725
|
<reponame>Fox520/PaymentGateway
from db_api import app
app.run(host="0.0.0.0", port=6001, debug=True)
| 1.101563
| 1
|
src/sensor.py
|
VirtualWolf/esp32-air-quality-reader-mqtt
| 1
|
12782726
|
import gc
import uasyncio as asyncio
import ujson
import utime
from machine import UART, WDT
import ustruct as struct
import logger
from config import read_configuration
c = read_configuration()
wdt = WDT(timeout=600000)
async def start_readings(client):
while True:
logger.log('Initialising UART bus')
uart = UART(1, 9600)
uart.init(9600, bits=8, parity=None, rx=16, timeout=250)
count = 0
while count < 30:
# logger.log('Warming sensor up, reading #%d of 30' % count)
await read_sensor(uart)
count = count + 1
await asyncio.sleep(1)
logger.log('Finished warming up')
data = await read_sensor(uart)
if data is None:
await asyncio.sleep(1)
await read_sensor(uart)
logger.log(data)
logger.log('Turning off UART bus')
uart.deinit()
timestamp = (utime.time() + 946684800) * 1000
data['timestamp'] = timestamp
json = ujson.dumps(data)
await client.publish(c['topic'], json, qos = 1, retain = True)
wdt.feed()
await asyncio.sleep(180)
async def read_sensor(uart):
try:
buffer = []
data = uart.read(32)
if data is None:
logger.log('No data received, re-running')
await asyncio.sleep(1)
return
data = list(data)
gc.collect()
buffer += data
while buffer and buffer[0] != 0x42:
buffer.pop(0)
# Avoid an overrun if all bad data
if len(buffer) > 200:
buffer = []
if len(buffer) < 32:
logger.log('Buffer length > 32, re-running')
await asyncio.sleep(1)
await read_sensor(uart)
if buffer[1] != 0x4d:
logger.log('Second element of buffer was not 0x4d, re-running')
buffer.pop(0)
await asyncio.sleep(1)
await read_sensor(uart)
frame_len = struct.unpack(">H", bytes(buffer[2:4]))[0]
gc.collect()
if frame_len != 28:
buffer = []
logger.log('Frame length was not 28, re-running')
await asyncio.sleep(1)
await read_sensor(uart)
# In order:
# - PM1.0 standard
# - PM2.5 standard
# - PM10 standard
# - PM1.0 environmental
# - PM2.5 environmental
# - PM10 environmental
# - Particles > 0.3m / 0.1L air
# - Particles > 0.5um / 0.1L air
# - Particles > 1.0um / 0.1L air
# - Particles > 2.5um / 0.1L air
# - Particles > 5.0um / 0.1L air
# - Particles > 10um / 0.1L air
# - Skip
# - Checksum
frame = struct.unpack(">HHHHHHHHHHHHHH", bytes(buffer[4:]))
check = sum(buffer[0:30])
if check != frame[-1]:
buffer = []
logger.log('Checksums don\'t match, re-running')
await asyncio.sleep(1)
await read_sensor(uart)
buffer = buffer[32:]
return {
'pm_1_0': frame[3],
'pm_2_5': frame[4],
'pm_10': frame[5],
'particles_0_3um': frame[6],
'particles_0_5um': frame[7],
'particles_1_0um': frame[8],
'particles_2_5um': frame[9],
'particles_5_0um': frame[10],
'particles_10um': frame[11],
}
except Exception as e:
logger.log(e)
| 2.53125
| 3
|
client/python/ProjectName/Client/__init__.py
|
KaNaDaAT/java-spring-template
| 0
|
12782727
|
# flake8: noqa
"""
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Generated by: https://openapi-generator.tech
"""
__version__ = "0.1.0"
# import ApiClient
from ProjectName.Client.api_client import ApiClient
# import Configuration
from ProjectName.Client.configuration import Configuration
# import exceptions
from ProjectName.Client.exceptions import OpenApiException
from ProjectName.Client.exceptions import ApiAttributeError
from ProjectName.Client.exceptions import ApiTypeError
from ProjectName.Client.exceptions import ApiValueError
from ProjectName.Client.exceptions import ApiKeyError
from ProjectName.Client.exceptions import ApiException
| 1.054688
| 1
|
interpreter/lexical_analysis/lexer.py
|
rand0musername/CInterpreter
| 0
|
12782728
|
""" SCI - Simple C Interpreter """
from .token_type import *
from .token import Token
# maps strings that have a special meaning to corresponding tokens
RESERVED_KEYWORDS = {
'char': Token(CHAR, 'char'),
'int': Token(INT, 'int'),
'float': Token(FLOAT, 'float'),
'double': Token(DOUBLE, 'double'),
'long': Token(LONG, 'long'),
'short': Token(SHORT, 'short'),
'signed': Token(SIGNED, 'signed'),
'unsigned': Token(UNSIGNED, 'unsigned'),
'if': Token(IF, 'if'),
'else': Token(ELSE, 'else'),
'for': Token(FOR, 'for'),
'while': Token(WHILE, 'while'),
'do': Token(DO, 'do'),
'return': Token(RETURN, 'return'),
'break': Token(BREAK, 'break'),
'continue': Token(CONTINUE, 'continue'),
'switch': Token(SWITCH, 'switch'),
'case': Token(CASE, 'case'),
'default': Token(DEFAULT, 'default'),
'struct': Token(STRUCT, 'struct'),
}
class LexicalError(Exception):
pass
class Lexer(object):
def __init__(self, text):
"""
Initializes the lexer.
text: the source code to be lexically analyzed
pos: the current lexer position
current_char: the character at the current lexer position
line: the current line number
"""
self.text = text
self.pos = 0
self.current_char = self.text[self.pos]
self.line = 1
def error(self, message):
""" Raises a lexical error. """
raise LexicalError("LexicalError: " + message)
def advance(self, n=1):
""" Advances the `pos` pointer and sets the `current_char` variable. """
self.pos += n
if self.pos >= len(self.text):
self.current_char = None # Indicates end of input
else:
self.current_char = self.text[self.pos]
def peek(self, n):
""" Returns the n-th char from the current positions but don't change state. """
peek_pos = self.pos + n
if peek_pos > len(self.text) - 1:
return None
else:
return self.text[peek_pos]
def skip_whitespace(self):
""" Skips all whitespace starting from the current position. """
while self.current_char is not None and self.current_char.isspace():
if self.current_char == '\n':
self.line += 1
self.advance()
def skip_comment(self):
""" Skips a single line comment starting at the current position. """
self.advance(2)
while self.current_char is not None:
if self.current_char == '\n':
self.line += 1
self.advance()
return
self.advance()
def skip_multiline_comment(self):
""" Skip a multi line comment starting at the current position. """
self.advance(2)
while self.current_char is not None:
if self.current_char == '*' and self.peek(1) == '/':
self.advance(2)
return
if self.current_char == '\n':
self.line += 1
self.advance()
self.error("Unterminated comment at line {}".format(self.line))
def number(self):
"""Handles an integer or a real number."""
result = ''
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
if self.current_char == '.':
result += self.current_char
self.advance()
while self.current_char is not None and self.current_char.isdigit():
result += self.current_char
self.advance()
token = Token(REAL_CONST, float(result))
else:
token = Token(INTEGER_CONST, int(result))
return token
def string(self):
""" Handles a string literal. """
result = ''
self.advance()
while self.current_char is not '"':
if self.current_char is None:
self.error(
message='Unterminated string literal at line {}'.format(self.line)
)
result += self.current_char
self.advance()
self.advance()
result = result.replace('\\n', '\n')
return Token(STRING, result)
def char(self):
""" Handles a character literal. """
self.advance()
ch = self.current_char
self.advance()
if ch == '\\' and self.current_char == 'n':
ch = '\n'
self.advance()
if self.current_char != '\'':
self.error("Unterminated char literal at line {}".format(self.line))
self.advance()
return Token(CHAR_CONST, ord(ch))
def _id(self):
""" Handles identifiers and reserved keywords. """
result = ''
while self.current_char is not None and (self.current_char.isalnum() or self.current_char == '_'):
result += self.current_char
self.advance()
# Return a reserved keyword token or an id token.
token = RESERVED_KEYWORDS.get(result, Token(ID, result))
return token
@property
def get_next_token(self):
""" The main lexer method that returns the next token in the text. """
while self.current_char is not None:
if self.current_char.isspace():
self.skip_whitespace()
continue
if self.current_char == '/' and self.peek(1) == '/':
self.skip_comment()
continue
if self.current_char == '/' and self.peek(1) == '*':
self.skip_multiline_comment()
continue
if self.current_char.isalpha():
return self._id()
if self.current_char.isdigit():
return self.number()
if self.current_char == '"':
return self.string()
if self.current_char == '\'':
return self.char()
# three-char tokens
if self.current_char == '<' and self.peek(1) == '<' and self.peek(2) == '=':
self.advance(3)
return Token(LEFT_ASSIGN, '<<=')
if self.current_char == '>' and self.peek(1) == '>' and self.peek(2) == '=':
self.advance(3)
return Token(RIGHT_ASSIGN, '>>=')
# two-char tokens
if self.current_char == '+' and self.peek(1) == '=':
self.advance(2)
return Token(ADD_ASSIGN, '+=')
if self.current_char == '-' and self.peek(1) == '=':
self.advance(2)
return Token(SUB_ASSIGN, '-=')
if self.current_char == '*' and self.peek(1) == '=':
self.advance(2)
return Token(MUL_ASSIGN, '*=')
if self.current_char == '/' and self.peek(1) == '=':
self.advance()
self.advance()
return Token(DIV_ASSIGN, '/=')
if self.current_char == '%' and self.peek(1) == '=':
self.advance(2)
return Token(MOD_ASSIGN, '%=')
if self.current_char == '&' and self.peek(1) == '=':
self.advance(2)
return Token(AND_ASSIGN, '&=')
if self.current_char == '^' and self.peek(1) == '=':
self.advance(2)
return Token(XOR_ASSIGN, '^=')
if self.current_char == '|' and self.peek(1) == '=':
self.advance(2)
return Token(OR_ASSIGN, '|=')
if self.current_char == '>' and self.peek(1) == '>':
self.advance(2)
return Token(RIGHT_OP, '>>')
if self.current_char == '<' and self.peek(1) == '<':
self.advance(2)
return Token(LEFT_OP, '<<')
if self.current_char == '+' and self.peek(1) == '+':
self.advance(2)
return Token(INC_OP, '++')
if self.current_char == '-' and self.peek(1) == '-':
self.advance(2)
return Token(DEC_OP, '--')
if self.current_char == '&' and self.peek(1) == '&':
self.advance(2)
return Token(LOG_AND_OP, '&&')
if self.current_char == '|' and self.peek(1) == '|':
self.advance(2)
return Token(LOG_OR_OP, '||')
if self.current_char == '<' and self.peek(1) == '=':
self.advance(2)
return Token(LE_OP, '<=')
if self.current_char == '>' and self.peek(1) == '=':
self.advance(2)
return Token(GE_OP, '>=')
if self.current_char == '=' and self.peek(1) == '=':
self.advance(2)
return Token(EQ_OP, '==')
if self.current_char == '!' and self.peek(1) == '=':
self.advance(2)
return Token(NE_OP, '!=')
if self.current_char == '-' and self.peek(1) == '>':
self.advance(2)
return Token(ARROW, '->')
# one-char tokens
if self.current_char == '<':
self.advance()
return Token(LT_OP, '<')
if self.current_char == '>':
self.advance()
return Token(GT_OP, '>')
if self.current_char == '=':
self.advance()
return Token(ASSIGN, '=')
if self.current_char == '!':
self.advance()
return Token(LOG_NEG, '!')
if self.current_char == '&':
self.advance()
return Token(AMPERSAND, '&')
if self.current_char == '|':
self.advance()
return Token(OR_OP, '|')
if self.current_char == '^':
self.advance()
return Token(XOR_OP, '|')
if self.current_char == '+':
self.advance()
return Token(PLUS, '+')
if self.current_char == '-':
self.advance()
return Token(MINUS, '-')
if self.current_char == '*':
self.advance()
return Token(ASTERISK, '*')
if self.current_char == '/':
self.advance()
return Token(DIV_OP, '/')
if self.current_char == '%':
self.advance()
return Token(MOD_OP, '%')
if self.current_char == '(':
self.advance()
return Token(LPAREN, '(')
if self.current_char == ')':
self.advance()
return Token(RPAREN, ')')
if self.current_char == '{':
self.advance()
return Token(LBRACKET, '{')
if self.current_char == '}':
self.advance()
return Token(RBRACKET, '}')
if self.current_char == ';':
self.advance()
return Token(SEMICOLON, ';')
if self.current_char == ':':
self.advance()
return Token(COLON, ':')
if self.current_char == ',':
self.advance()
return Token(COMMA, ',')
if self.current_char == '.':
self.advance()
return Token(DOT, '.')
if self.current_char == '#':
self.advance()
return Token(HASH, '#')
if self.current_char == '?':
self.advance()
return Token(QUESTION_MARK, '?')
self.error(
message="Invalid char {} at line {}".format(self.current_char, self.line)
)
return Token(EOF, None)
| 3.828125
| 4
|
utils/reader.py
|
syth0le/Neural-Networks-Labs
| 0
|
12782729
|
<reponame>syth0le/Neural-Networks-Labs<gh_stars>0
from typing import List
# from keras.datasets import mnist
from openpyxl import load_workbook
class Reader:
def __init__(self):
pass
# (train_X, train_y), (test_X, test_y) = mnist.load_data()
# self.train_X = train_X
# self.train_y = train_y
# self.test_X = test_X
# self.test_y = test_y
@classmethod
def read_train_data(cls):
X = []
table = load_workbook('number_train.xlsx')
table_sheet_names = table.sheetnames
for sheet in range(len(table.sheetnames)):
X.append([])
for i in range(5):
for j in range(7):
X[len(X) - 1].append(
table.get_sheet_by_name(str(table_sheet_names[sheet])).cell(row=j + 1, column=i + 1).value)
return X
@classmethod
def read_test_data(cls):
X = []
table = load_workbook('number_data.xlsx')
table_sheet = table.get_sheet_by_name("0")
for i in range(5):
for j in range(7):
X.append(table_sheet.cell(row=j + 1, column=i + 1).value)
return X
@staticmethod
def get_target_data(number: int) -> List[float]:
table = load_workbook('number_target.xlsx')
target = []
for i in range(10):
target.append(table.get_sheet_by_name(str(1)).cell(row=i + 1, column=number + 1).value)
return target
| 2.765625
| 3
|
openpifpaf/train_instance_scorer.py
|
adujardin/openpifpaf
| 0
|
12782730
|
import json
import random
import pysparkling
import torch
from .decoder.utils.instance_scorer import InstanceScorer
from . import show
DATA_FILE = ('outputs/resnet101block5-pif-paf-edge401-190412-151013.pkl'
'.decodertraindata-edge641-samples0.json')
# pylint: skip-file
def plot_training_data(train_data, val_data, entry=0, entryname=None):
train_x, train_y = train_data
val_x, val_y = val_data
with show.canvas() as ax:
ax.hist([xx[entry] for xx in train_x[train_y[:, 0] == 1]],
bins=50, alpha=0.3, density=True, color='navy', label='train true')
ax.hist([xx[entry] for xx in train_x[train_y[:, 0] == 0]],
bins=50, alpha=0.3, density=True, color='orange', label='train false')
ax.hist([xx[entry] for xx in val_x[val_y[:, 0] == 1]],
histtype='step', bins=50, density=True, color='navy', label='val true')
ax.hist([xx[entry] for xx in val_x[val_y[:, 0] == 0]],
histtype='step', bins=50, density=True, color='orange', label='val false')
if entryname:
ax.set_xlabel(entryname)
ax.legend()
def train_val_split_score(data, train_fraction=0.6, balance=True):
xy_list = data.map(lambda d: ([d['score']], [float(d['target'])])).collect()
if balance:
n_true = sum(1 for x, y in xy_list if y[0] == 1.0)
n_false = sum(1 for x, y in xy_list if y[0] == 0.0)
p_true = min(1.0, n_false / n_true)
p_false = min(1.0, n_true / n_false)
xy_list = [(x, y) for x, y in xy_list
if random.random() < (p_true if y[0] == 1.0 else p_false)]
n_train = int(train_fraction * len(xy_list))
return (
(torch.tensor([x for x, _ in xy_list[:n_train]]),
torch.tensor([y for _, y in xy_list[:n_train]])),
(torch.tensor([x for x, _ in xy_list[n_train:]]),
torch.tensor([y for _, y in xy_list[n_train:]])),
)
def train_val_split_keypointscores(data, train_fraction=0.6):
xy_list = (
data
.map(lambda d: ([d['score']] + [xyv[2] for xyv in d['keypoints']] + d['joint_scales'],
[float(d['target'])]))
.collect()
)
n_train = int(train_fraction * len(xy_list))
return (
(torch.tensor([x for x, _ in xy_list[:n_train]]),
torch.tensor([y for _, y in xy_list[:n_train]])),
(torch.tensor([x for x, _ in xy_list[n_train:]]),
torch.tensor([y for _, y in xy_list[n_train:]])),
)
def train_epoch(model, loader, optimizer):
epoch_loss = 0.0
for x, y in loader:
optimizer.zero_grad()
y_hat = model(x)
loss = torch.nn.functional.binary_cross_entropy(y_hat, y)
epoch_loss += float(loss.item())
loss.backward()
optimizer.step()
return epoch_loss / len(loader)
def val_epoch(model, loader):
epoch_loss = 0.0
with torch.no_grad():
for x, y in loader:
y_hat = model(x)
loss = torch.nn.functional.binary_cross_entropy(y_hat, y)
epoch_loss += float(loss.item())
return epoch_loss / len(loader)
def main():
sc = pysparkling.Context()
data = sc.textFile(DATA_FILE).map(json.loads).cache()
train_data_score, val_data_score = train_val_split_score(data)
plot_training_data(train_data_score, val_data_score, entryname='score')
train_data, val_data = train_val_split_keypointscores(data)
model = InstanceScorer()
train_dataset = torch.utils.data.TensorDataset(*train_data)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=256, shuffle=True)
val_dataset = torch.utils.data.TensorDataset(*val_data)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=256, shuffle=False)
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)
for epoch_i in range(100):
train_loss = train_epoch(model, train_loader, optimizer)
val_loss = val_epoch(model, val_loader)
print(epoch_i, train_loss, val_loss)
with torch.no_grad():
post_train_data = (model(train_data[0]), train_data[1])
post_val_data = (model(val_data[0]), val_data[1])
plot_training_data(post_train_data, post_val_data, entryname='optimized score')
torch.save(model, 'instance_scorer.pkl')
if __name__ == '__main__':
main()
| 2.296875
| 2
|
inverter.py
|
akarasman/yolo-heatmaps
| 0
|
12782731
|
# trunk-ignore(black-py)
import torch
from torch.nn import Conv1d, Conv2d, Conv3d, MaxPool1d, MaxPool2d, MaxPool3d, Linear, Upsample
from lrp.utils import Flatten
from inverter_util import ( upsample_inverse, max_pool_nd_inverse,
max_pool_nd_fwd_hook, conv_nd_fwd_hook, linear_fwd_hook,
upsample_fwd_hook, silent_pass )
FWD_HOOK = { torch.nn.MaxPool1d : max_pool_nd_fwd_hook,
torch.nn.MaxPool2d : max_pool_nd_fwd_hook,
torch.nn.MaxPool3d : max_pool_nd_fwd_hook,
torch.nn.Conv1d : conv_nd_fwd_hook,
torch.nn.Conv2d : conv_nd_fwd_hook,
torch.nn.Conv3d : conv_nd_fwd_hook,
torch.nn.Linear : linear_fwd_hook,
torch.nn.Upsample : upsample_fwd_hook,
torch.nn.BatchNorm1d : silent_pass,
torch.nn.BatchNorm2d : silent_pass,
torch.nn.BatchNorm3d : silent_pass,
torch.nn.ReLU : silent_pass,
torch.nn.modules.activation.ReLU : silent_pass,
torch.nn.ELU : silent_pass,
Flatten : silent_pass,
torch.nn.Dropout : silent_pass,
torch.nn.Dropout2d : silent_pass,
torch.nn.Dropout3d : silent_pass,
torch.nn.Softmax : silent_pass,
torch.nn.LogSoftmax : silent_pass,
torch.nn.Sigmoid : silent_pass,
torch.nn.SiLU : conv_nd_fwd_hook }
# Rule-independant inversion functions
IDENTITY_MAPPINGS = ( torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.ReLU,
torch.nn.modules.activation.ReLU,
torch.nn.ELU,
Flatten,
torch.nn.Dropout,
torch.nn.Dropout2d,
torch.nn.Dropout3d,
torch.nn.Softmax,
torch.nn.LogSoftmax,
torch.nn.Sigmoid,
torch.nn.SiLU )
def module_tracker(fwd_hook_func):
"""
Wrapper for tracking the layers throughout the forward pass.
Arguments
---------
fwd_hook_func : function
Forward hook function to be wrapped.
Returns
-------
function :
Wrapped hook function
"""
def hook_wrapper(layer, *args):
return fwd_hook_func(layer, *args)
return hook_wrapper
class Inverter(torch.nn.Module):
"""
Class for computing the relevance propagation and supplying the necessary forward hooks for all layers.
Attributes
----------
linear_rule : LinearRule
Propagation rule to use for linear layers
conv_rule : ConvRule
Propagation rule for convolutional layers
pass_not_implemented : bool
Silent pass layers that have no registered forward hooks
device : torch.device
Device to put relevance data
Methods
-------
Propagates incoming relevance for the specified layer, applying any
necessary inversion functions along the way.
"""
# Implemented rules for relevance propagation.
def __init__(self, linear_rule=None, conv_rule=None, pass_not_implemented=False,
device=torch.device('cpu'),):
self.device = device
self.warned_log_softmax = False
self.linear_rule = linear_rule
self.conv_rule = conv_rule
self.fwd_hooks = FWD_HOOK
self.inv_funcs= {}
self.pass_not_implemented = pass_not_implemented
self.module_list = []
def register_fwd_hook(self, module, fwd_hook):
"""
Register forward hook function to module.
"""
if module in self.fwd_hooks.keys():
print('Warning: Replacing previous fwd hook registered for {}'.
format(module))
self.fwd_hooks[module] = fwd_hook
def register_inv_func(self, module, inv_func):
"""
Register inverse function to module.
"""
if module in self.inv_funcs.keys():
print('Warning: Replacing previous inverse registered for {}'.
format(module))
self.inv_funcs[module] = inv_func
def get_layer_fwd_hook(self, layer) :
"""
Interface for getting any layer's forward hook
"""
try :
return self.fwd_hooks[type(layer)]
except :
if self.pass_not_implemented :
return silent_pass
raise \
NotImplementedError('Forward hook for layer type \"{}\" not implemented'.
format(type(layer)))
def invert(self, layer : torch.nn.Module, relevance : torch.Tensor, **kwargs) -> torch.Tensor :
"""
This method computes the backward pass for the incoming relevance
for the specified layer.
Arguments
---------
layer : torch.nn.Module
Layer to propagate relevance through. Can be Conv1d, Conv2d or
any combination thereof in a higher level module.
relevance : torch.Tensor
Incoming relevance from higher up in the network.
Returns
------
torch.Tensor :
Redistributed relevance going to the lower layers in the network.
"""
if isinstance(layer, (Conv1d, Conv2d, Conv3d)):
if self.conv_rule is None :
raise Exception('Model contains conv layers but the conv rule was not set !')
return self.conv_rule(layer, relevance, **kwargs)
elif isinstance(layer, (MaxPool1d, MaxPool2d, MaxPool3d)):
return max_pool_nd_inverse(layer, relevance)
elif isinstance(layer, Linear) :
if self.linear_rule is None :
raise Exception('Model contains linear layers but the linear rule was not set !')
return self.linear_rule(layer, relevance.tensor, **kwargs)
elif isinstance(layer, Upsample):
return upsample_inverse(layer, relevance)
elif isinstance(layer, torch.nn.modules.container.Sequential):
for l in layer[::-1] :
relevance = self.invert(l, relevance)
return relevance
elif type(layer) in IDENTITY_MAPPINGS :
return relevance
elif hasattr(layer, 'propagate'):
return layer.propagate(self, relevance)
else :
try :
return self.inv_funcs[type(layer)](self, layer, relevance, **kwargs)
except KeyError :
raise NotImplementedError(f'Relevance propagation not implemented for layer type {type(layer)}')
def __call__(self, layer : torch.nn.Module, relevance : torch.Tensor, **kwargs) -> torch.Tensor :
""" Wrapper for invert method """
return self.invert(layer, relevance, **kwargs)
| 1.9375
| 2
|
_modules/utils/html/escape(text)/tests.py
|
looking-for-a-job/django-examples
| 0
|
12782732
|
<filename>_modules/utils/html/escape(text)/tests.py
#!/usr/bin/env python
from django.utils.html import escape
"""
https://docs.djangoproject.com/en/dev/ref/utils/#django.utils.html.escape
escape(text)
"""
text = "<script>alert('test')</script>"
print(escape(text))
| 1.710938
| 2
|
admin/route_manager.py
|
modcastpodcast/link-shortener-backend
| 4
|
12782733
|
"""
Module to take in a directory, iterate through it and create a Starlette routing map.
"""
import importlib
import inspect
from pathlib import Path
from typing import Union
from starlette.routing import Route as StarletteRoute, Mount
from nested_dict import nested_dict
from admin.route import Route
def construct_route_map_from_dict(route_dict: dict):
route_map = []
for mount, item in route_dict.items():
if inspect.isclass(item):
route_map.append(StarletteRoute(mount, item))
else:
route_map.append(Mount(mount, routes=construct_route_map_from_dict(item)))
return route_map
def create_route_map():
routes_directory = Path("admin") / "routes"
route_dict = nested_dict()
for file in routes_directory.rglob("*.py"):
import_name = f"{str(file.parent).replace('/', '.')}.{file.stem}"
route = importlib.import_module(import_name)
for _member_name, member in inspect.getmembers(route):
if inspect.isclass(member):
if issubclass(member, Route) and member != Route:
member.check_parameters()
levels = str(file.parent).split("/")[2:]
current_level = None
for level in levels:
if current_level is None:
current_level = route_dict[f"/{level}"]
else:
current_level = current_level[f"/{level}"]
if current_level is not None:
current_level[member.path] = member
else:
route_dict[member.path] = member
route_map = construct_route_map_from_dict(route_dict.to_dict())
return route_map
| 3.015625
| 3
|
bayesian_linear_regressor.py
|
KoyoteScience/BayesianLinearRegressor
| 0
|
12782734
|
import numpy as np
class BayesLinearRegressor:
def __init__(self, number_of_features, alpha=1e6):
'''
:param number_of_features: Integer number of features in the training rows, excluding the intercept and output values
:param alpha: Float inverse ridge regularizaiton constant, set to 1e6
'''
# alpha is our initial guess on the variance, basically, all parameters initialized to 0 with alpha variance
# so, you know, just set it super-high. This is the same as L2 regularization, btw!
# all those weird Bayesian update rules actually amount to very standard linear algebra identities
# Once you see that it's just updating the moment matrix and the sum of squared residuals, it's straightforward!
# So those are our internal variables that everything else depends upon
self.number_of_features = number_of_features
self.alpha = alpha
self.beta_means = np.array([0] * (number_of_features + 1), dtype=np.float) # + 1 for the intercept
self.number_of_updates = 0
self.residual_sum_squares = 0
self.moment_matrix = np.eye(number_of_features + 2) * 0.0 # + 2 for the intercept and the output
self.regularization_matrix = np.eye(self.number_of_features + 1) / self.alpha
self.regularization_matrix[0, 0] = 0 # we don't regularize the intercept term
def partial_fit(self, X, Y, W=None, reverse=False):
'''
The online updating rules
:param X: Input feature vector(s) as 2D numpy array
:param Y: Input output values as 1D numpy array
:param W: Data weights (relative to unity) as a 1D numpy array
:param reverse: Boolean, True means that we "unfit" the training rows, otherwise acts as normal
:return: None
'''
# see http://www.biostat.umn.edu/~ph7440/pubh7440/BayesianLinearModelGoryDetails.pdf for gory details
# clear the frozen parameter sample since we are updating the parameter distributions
self.frozen_parameter_sample = None
moment_of_X_before = self.moment_matrix[:-1, :-1]
beta_means_before = self.beta_means.copy()
inverted_covariance_matrix_before = moment_of_X_before + self.regularization_matrix
# Here we concatenate the intercept input value (constant 1), the input vector, and the output value:
rank_n_obs_update_matrix = np.array([[1] + row + output for row, output in zip(X.tolist(), Y.tolist())])
if W is None:
moment_matrix_update_term = rank_n_obs_update_matrix.T @ rank_n_obs_update_matrix
else:
moment_matrix_update_term = rank_n_obs_update_matrix.T @ np.diag(W.tolist()) @ rank_n_obs_update_matrix
if not reverse:
self.moment_matrix += moment_matrix_update_term
moment_of_Y_update_term = Y.T @ Y
self.number_of_updates += 1
else:
self.moment_matrix -= moment_matrix_update_term
moment_of_Y_update_term = -Y.T @ Y
self.number_of_updates -= 1
moment_of_X = self.moment_matrix[:-1, :-1]
moment_of_X_and_Y = self.moment_matrix[:-1, -1]
moment_of_X_and_Y_update_term = moment_matrix_update_term[:-1, -1]
inverted_covariance_matrix = moment_of_X + self.regularization_matrix
covariance_matrix = np.linalg.inv(inverted_covariance_matrix)
# these two statements are equivalent, so I choose the simpler one, although the latter
# one is more consistent with the notation I come across in the literature
self.beta_means = covariance_matrix @ (moment_of_X_and_Y)
# self.beta_means = covariance_matrix @ (inverted_covariance_matrix_before @ beta_means_before + moment_of_X_and_Y_update_term)
if self.number_of_updates > len(covariance_matrix) - 1:
self.residual_sum_squares += (
moment_of_Y_update_term -
self.beta_means.T @ inverted_covariance_matrix @ self.beta_means +
beta_means_before.T @ inverted_covariance_matrix_before @ beta_means_before
)
def partial_unfit(self, X, Y):
return self.partial_fit(X, Y, reverse=True)
def predict(self, X, use_means=False, freeze_parameter_sample=False):
'''
:param X: Input feature vector excluding the intercept constant as a 2D numpy array
:param use_means: Boolean where True means we just provide the prediciton at the mean of the coefficients
(sometimes referred to as deterministic prediction); otherwise sample parameters from the multivariate norm
and incorporate the uncertainty of the parameters in your prediction
:param freeze_parameter_sample: Boolean. When set to True, we sample from the parameters only once for each prediction
:return:
'''
X_with_intercept = np.array([[1] + row.tolist() for row in X])
scale_multiplier = 1.0 / max(1.0, (self.number_of_updates - self.number_of_features - 1))
if use_means:
return X_with_intercept @ self.beta_means
else:
if freeze_parameter_sample:
if self.frozen_parameter_sample is not None:
self.frozen_parameter_sample = np.random.multivariate_normal(
self.beta_means.T[0],
self.residual_sum_squares * scale_multiplier * self.cov
)
beta = self.frozen_parameter_sample
else:
beta = np.random.multivariate_normal(
self.beta_means.T[0],
self.residual_sum_squares * scale_multiplier * self.cov_params
)
return X_with_intercept @ beta
@property
def coef_(self):
return self.beta_means[1:]
@property
def intercept_(self):
return float(self.beta_means[0])
@property
def cov_params(self):
scale_multiplier = 1.0 / max(1.0, (self.number_of_updates - self.number_of_features - 1))
moment_of_X = self.moment_matrix[:-1, :-1]
inverted_covariance_matrix = moment_of_X + np.eye(self.number_of_features + 1) / self.alpha
return np.linalg.inv(inverted_covariance_matrix) * self.residual_sum_squares * scale_multiplier
| 3.0625
| 3
|
opennre/tokenization/word_piece_tokenizer.py
|
WinterSoHot/OpenNRE
| 3,284
|
12782735
|
<filename>opennre/tokenization/word_piece_tokenizer.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WordpieceTokenizer classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unicodedata
from .utils import (load_vocab,
convert_to_unicode,
clean_text,
split_on_whitespace,
convert_by_vocab,
tokenize_chinese_chars)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab = None, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = load_vocab(vocab)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
""" Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`.
Returns:
output_tokens: A list of wordpiece tokens.
current_positions: A list of the current positions for the original words in text .
"""
text = convert_to_unicode(text)
text = clean_text(text)
text = tokenize_chinese_chars(text)
output_tokens = []
current_positions = []
token_list = split_on_whitespace(text)
for chars in token_list:
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
if start > 0:
substr = "##" + chars[start:end]
else:
substr = chars[start:end]
cur_substr = None
while start < end:
if substr in self.vocab:
cur_substr = substr
break
end -= 1
substr = substr[:-1]
if cur_substr is None:
is_bad = True
break
else:
sub_tokens.append(cur_substr)
start = end
current_positions.append([])
if is_bad:
current_positions[-1].append(len(output_tokens))
output_tokens.append(self.unk_token)
current_positions[-1].append(len(output_tokens))
else:
current_positions[-1].append(len(output_tokens))
output_tokens.extend(sub_tokens)
current_positions[-1].append(len(output_tokens))
return output_tokens, current_positions
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
| 2.578125
| 3
|
indy_node/server/req_handlers/read_req_handlers/get_revoc_reg_handler.py
|
rantwijk/indy-node
| 0
|
12782736
|
<reponame>rantwijk/indy-node
from plenum.server.request_handlers.handler_interfaces.read_request_handler import ReadRequestHandler
class GetRevocRegHandler(ReadRequestHandler):
pass
| 1.421875
| 1
|
generate_maze.py
|
temibabs/MPHRL
| 8
|
12782737
|
<filename>generate_maze.py<gh_stars>1-10
import os
import numpy as np
import tensorflow as tf
from config import c, n
import utils
def calc_lookat(corridors):
l, t, r, b = zip(* [i['pos'] for i in corridors])
min_l = min(l)
max_r = max(r)
min_b = min(b)
max_t = max(t)
return [np.mean([min_l, max_r]), np.mean([min_b, max_t]), 0]
def validate_paths(corridors):
for i in range(len(corridors)):
for j in range(i + 1, len(corridors)):
l1, t1, r1, b1 = corridors[i]['pos']
l2, t2, r2, b2 = corridors[j]['pos']
if intersect(l1, l2, r1, r2) and intersect(b1, b2, t1, t2):
return False
return True
def intersect(l1, l2, r1, r2):
if l1 < l2 < r1 or l1 < r2 < r1 or l2 < l1 < r2 or l2 < r1 < r2:
print('no good...', l1, l2, r1, r2)
return True
return False
def main():
mazes = []
goals = []
direction = 2
for i in range(c.num_mazes):
valid = False
while not valid:
paths = [[0, 0]]
# NOTE: MAZE
for j in range(c.num_corr[i]):
# NOTE: CORRIDOR
corr_len = np.random.randint(low=c.min_corr_len, high=c.max_corr_len)
xy = np.zeros(2).tolist()
if direction < 2:
direction = 1 - direction
else:
direction = np.random.randint(low=0, high=2)
xy[direction] = paths[-1][direction] + corr_len * (
np.random.randint(low=0, high=2) * 2 - 1)
xy[1 - direction] = paths[-1][1 - direction]
paths.append(xy)
paths = np.array(paths)
valid = validate_paths(utils.calc_corridors(paths))
mazes.append(paths)
goals.append(paths[-1])
for i in range(c.num_mazes):
c.paths = mazes[i]
c.goal[0] = goals[i][0]
c.goal[1] = goals[i][1]
c.max_frames = 1
c.viewer = True
c.finalize = False
c.corridors = utils.calc_corridors(c.paths)
utils.modify_xml(task_id=None, seed=seed) # TODO(jkg)
algo = algo_class(0, False, False, 0)
algo.record_one_rollout()
tf.reset_default_graph()
print(np.array(mazes).tolist())
if __name__ == "__main__":
module = __import__("algorithms.%s" % c.algo, fromlist=[c.algo])
algo_class = getattr(module, c.algo)
for f in os.listdir(os.path.join('results', c.ckpt_path)):
if '.gif' in f or '.png' or '.mp4' in f:
os.remove(os.path.join('results', c.ckpt_path, f))
main()
| 2.671875
| 3
|
discordbot.py
|
umeharaumeo/discordpy-startup
| 0
|
12782738
|
<filename>discordbot.py
import discord
import os
token = os.environ['TOKEN_ON_TOKEN']
# 接続に必要なオブジェクトを生成
client = discord.Client()
# メッセージ受信時に動作する処理
@client.event
async def on_message(message):
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
return
if message.content == 'ID':
embed = discord.Embed(title='おのすにゃん', description='', color=0xff0000)
embed.add_field(name="Switch", value="0884-9004-7707", inline=True)
embed.add_field(name="Origin", value="mgmgOI4i", inline=True)
embed.add_field(name="VALORANT", value="もぐもぐおいしい #JP1", inline=True)
await message.channel.send(embed=embed)
if message.content == 'ふれんど':
embed = discord.Embed(title='はづぴす', description='', color=0xff0000)
embed.add_field(name="VALORANT", value="hazupisu#JP1", inline=True)
embed.add_field(name="Origin", value="hazupisu", inline=True)
embed.add_field(name="Steam", value="Hazuki.JP", inline=True)
await message.channel.send(embed=embed)
if message.content == 'クラウド':
await message.channel.send('時代はクラウド')
if message.content == 'にこちゃん':
await message.channel.send('にっこにっこに~!')
if message.content == 'ねえ':
await message.channel.send('はい!!')
if message.content == 'クリエイトアドバイザー':
await message.channel.send('おのすにゃん')
if message.content == '制作者':
await message.channel.send('はづぴす')
if message.content == '制作':
await message.channel.send('協力 おのすにゃん\n\n©︎ 2020-2021 はづぴす')
if message.content == 'MJ':
await message.channel.send('PW 1234\n部屋名 はづぴす')
if message.content == 'すき':
await message.channel.send('らぶにこっ(⋈◍>◡<◍)。✧♡')
if message.content == 'おかえり':
await message.channel.send('ただいま♡')
if message.content == 'はづぴす':
await message.channel.send('ママ大好き♡')
if message.content == 'ただいま':
await message.channel.send('おかえり♡')
if message.content == '30歳無職彼氏無し':
await message.channel.send('はづぴす')
# Botの起動とDiscordサーバーへの接続
client.run(token)
| 2.59375
| 3
|
boto/redshift/layer1.py
|
adastreamer/boto
| 1
|
12782739
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.redshift import exceptions
class RedshiftConnection(AWSQueryConnection):
"""
Amazon Redshift **Overview**
This is the Amazon Redshift API Reference. This guide provides
descriptions and samples of the Amazon Redshift API.
Amazon Redshift manages all the work of setting up, operating, and
scaling a data warehouse: provisioning capacity, monitoring and
backing up the cluster, and applying patches and upgrades to the
Amazon Redshift engine. You can focus on using your data to
acquire new insights for your business and customers.
**Are You a First-Time Amazon Redshift User?**
If you are a first-time user of Amazon Redshift, we recommend that
you begin by reading the following sections:
+ Service Highlights and Pricing - The `product detail page`_
provides the Amazon Redshift value proposition, service highlights
and pricing.
+ Getting Started - The `Getting Started Guide`_ includes an
example that walks you through the process of creating a cluster,
creating database tables, uploading data, and testing queries.
After you complete the Getting Started Guide, we recommend that
you explore one of the following guides:
+ Cluster Management - If you are responsible for managing Amazon
Redshift clusters, the `Cluster Management Guide`_ shows you how
to create and manage Amazon Redshift clusters. If you are an
application developer, you can use the Amazon Redshift Query API
to manage clusters programmatically. Additionally, the AWS SDK
libraries that wrap the underlying Amazon Redshift API simplify
your programming tasks. If you prefer a more interactive way of
managing clusters, you can use the Amazon Redshift console and the
AWS command line interface (AWS CLI). For information about the
API and CLI, go to the following manuals :
+ API Reference ( this document )
+ `CLI Reference`_
+ Amazon Redshift Database Database Developer - If you are a
database developer, the Amazon Redshift `Database Developer
Guide`_ explains how to design, build, query, and maintain the
databases that make up your data warehouse.
For a list of supported AWS regions where you can provision a
cluster, go to the `Regions and Endpoints`_ section in the Amazon
Web Services Glossary .
"""
APIVersion = "2012-12-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "redshift.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"ClusterNotFound": exceptions.ClusterNotFoundFault,
"InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault,
"InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupStateFault,
"ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault,
"InvalidClusterState": exceptions.InvalidClusterStateFault,
"InvalidRestore": exceptions.InvalidRestoreFault,
"ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault,
"NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceededFault,
"ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault,
"InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault,
"UnauthorizedOperation": exceptions.UnauthorizedOperationFault,
"ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault,
"InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault,
"ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault,
"AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceededFault,
"InvalidSubne": exceptions.InvalidSubnet,
"ResizeNotFound": exceptions.ResizeNotFoundFault,
"ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault,
"ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceededFault,
"AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault,
"InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault,
"NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault,
"ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceededFault,
"UnsupportedOption": exceptions.UnsupportedOptionFault,
"ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFoundFault,
"ClusterAlreadyExists": exceptions.ClusterAlreadyExistsFault,
"ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault,
"ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault,
"ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault,
"ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault,
"AuthorizationNotFound": exceptions.AuthorizationNotFoundFault,
"ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault,
"AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault,
"InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault,
"ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault,
"ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault,
"ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault,
"InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault,
"ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault,
"SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse,
"AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault,
"UnauthorizedOperation": exceptions.UnauthorizedOperationFault,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def authorize_cluster_security_group_ingress(self,
cluster_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Adds an inbound (ingress) rule to an Amazon Redshift security
group. Depending on whether the application accessing your
cluster is running on the Internet or an EC2 instance, you can
authorize inbound access to either a Classless Interdomain
Routing (CIDR) IP address range or an EC2 security group. You
can add as many as 20 ingress rules to an Amazon Redshift
security group.
The EC2 security group must be defined in the AWS region where
the cluster resides.
For an overview of CIDR blocks, see the Wikipedia article on
`Classless Inter-Domain Routing`_.
You must also associate the security group with a cluster so
that clients running on these IP addresses or the EC2 instance
are authorized to connect to the cluster. For information
about managing security groups, go to `Working with Security
Groups`_ in the Amazon Redshift Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of the security group to
which the ingress rule is added.
:type cidrip: string
:param cidrip: The IP range to be added the Amazon Redshift security
group.
:type ec2_security_group_name: string
:param ec2_security_group_name: The EC2 security group to be added the
Amazon Redshift security group.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the owner
of the security group specified by the EC2SecurityGroupName
parameter. The AWS Access Key ID is not an acceptable value.
Example: `111122223333`
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
}
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='AuthorizeClusterSecurityGroupIngress',
verb='POST',
path='/', params=params)
def authorize_snapshot_access(self, snapshot_identifier,
account_with_restore_access,
snapshot_cluster_identifier=None):
"""
Authorizes the specified AWS customer account to restore the
specified snapshot.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type snapshot_identifier: string
:param snapshot_identifier: The identifier of the snapshot the account
is authorized to restore.
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier:
:type account_with_restore_access: string
:param account_with_restore_access: The identifier of the AWS customer
account authorized to restore the specified snapshot.
"""
params = {
'SnapshotIdentifier': snapshot_identifier,
'AccountWithRestoreAccess': account_with_restore_access,
}
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='AuthorizeSnapshotAccess',
verb='POST',
path='/', params=params)
def copy_cluster_snapshot(self, source_snapshot_identifier,
target_snapshot_identifier,
source_snapshot_cluster_identifier=None):
"""
Copies the specified automated cluster snapshot to a new
manual cluster snapshot. The source must be an automated
snapshot and it must be in the available state.
When you delete a cluster, Amazon Redshift deletes any
automated snapshots of the cluster. Also, when the retention
period of the snapshot expires, Amazon Redshift automatically
deletes it. If you want to keep an automated snapshot for a
longer period, you can make a manual copy of the snapshot.
Manual snapshots are retained until you delete them.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type source_snapshot_identifier: string
:param source_snapshot_identifier:
The identifier for the source snapshot.
Constraints:
+ Must be the identifier for a valid automated snapshot whose state is
"available".
:type source_snapshot_cluster_identifier: string
:param source_snapshot_cluster_identifier:
:type target_snapshot_identifier: string
:param target_snapshot_identifier:
The identifier given to the new manual snapshot.
Constraints:
+ Cannot be null, empty, or blank.
+ Must contain from 1 to 255 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for the AWS account that is making the request.
"""
params = {
'SourceSnapshotIdentifier': source_snapshot_identifier,
'TargetSnapshotIdentifier': target_snapshot_identifier,
}
if source_snapshot_cluster_identifier is not None:
params['SourceSnapshotClusterIdentifier'] = source_snapshot_cluster_identifier
return self._make_request(
action='CopyClusterSnapshot',
verb='POST',
path='/', params=params)
def create_cluster(self, cluster_identifier, node_type, master_username,
master_user_password, db_name=None, cluster_type=None,
cluster_security_groups=None,
vpc_security_group_ids=None,
cluster_subnet_group_name=None,
availability_zone=None,
preferred_maintenance_window=None,
cluster_parameter_group_name=None,
automated_snapshot_retention_period=None, port=None,
cluster_version=None, allow_version_upgrade=None,
number_of_nodes=None, publicly_accessible=None,
encrypted=None):
"""
Creates a new cluster. To create the cluster in virtual
private cloud (VPC), you must provide cluster subnet group
name. If you don't provide a cluster subnet group name or the
cluster security group parameter, Amazon Redshift creates a
non-VPC cluster, it associates the default cluster security
group with the cluster. For more information about managing
clusters, go to `Amazon Redshift Clusters`_ in the Amazon
Redshift Management Guide .
:type db_name: string
:param db_name:
The name of the first database to be created when the cluster is
created.
To create additional databases after the cluster is created, connect to
the cluster with a SQL client and use SQL commands to create a
database. For more information, go to `Create a Database`_ in the
Amazon Redshift Developer Guide.
Default: `dev`
Constraints:
+ Must contain 1 to 64 alphanumeric characters.
+ Must contain only lowercase letters.
+ Cannot be a word that is reserved by the service. A list of reserved
words can be found in `Reserved Words`_ in the Amazon Redshift
Developer Guide.
:type cluster_identifier: string
:param cluster_identifier: A unique identifier for the cluster. You use
this identifier to refer to the cluster for any subsequent cluster
operations such as deleting or modifying. The identifier also
appears in the Amazon Redshift console.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ Alphabetic characters must be lowercase.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for all clusters within an AWS account.
Example: `myexamplecluster`
:type cluster_type: string
:param cluster_type: The type of the cluster. When cluster type is
specified as
+ `single-node`, the **NumberOfNodes** parameter is not required.
+ `multi-node`, the **NumberOfNodes** parameter is required.
Valid Values: `multi-node` | `single-node`
Default: `multi-node`
:type node_type: string
:param node_type: The node type to be provisioned for the cluster. For
information about node types, go to ` Working with Clusters`_ in
the Amazon Redshift Management Guide .
Valid Values: `dw.hs1.xlarge` | `dw.hs1.8xlarge`.
:type master_username: string
:param master_username:
The user name associated with the master user account for the cluster
that is being created.
Constraints:
+ Must be 1 - 128 alphanumeric characters.
+ First character must be a letter.
+ Cannot be a reserved word. A list of reserved words can be found in
`Reserved Words`_ in the Amazon Redshift Developer Guide.
:type master_user_password: string
:param master_user_password:
The password associated with the master user account for the cluster
that is being created.
Constraints:
+ Must be between 8 and 64 characters in length.
+ Must contain at least one uppercase letter.
+ Must contain at least one lowercase letter.
+ Must contain one number.
+ Can be any printable ASCII character (ASCII code 33 to 126) except '
(single quote), " (double quote), \, /, @, or space.
:type cluster_security_groups: list
:param cluster_security_groups: A list of security groups to be
associated with this cluster.
Default: The default cluster security group for Amazon Redshift.
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of Virtual Private Cloud (VPC)
security groups to be associated with the cluster.
Default: The default VPC security group is associated with the cluster.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of a cluster subnet group to
be associated with this cluster.
If this parameter is not provided the resulting cluster will be
deployed outside virtual private cloud (VPC).
:type availability_zone: string
:param availability_zone: The EC2 Availability Zone (AZ) in which you
want Amazon Redshift to provision the cluster. For example, if you
have several EC2 instances running in a specific Availability Zone,
then you might want the cluster to be provisioned in the same zone
in order to decrease network latency.
Default: A random, system-chosen Availability Zone in the region that
is specified by the endpoint.
Example: `us-east-1d`
Constraint: The specified Availability Zone must be in the same region
as the current endpoint.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which automated cluster maintenance can occur.
Format: `ddd:hh24:mi-ddd:hh24:mi`
Default: A 30-minute window selected at random from an 8-hour block of
time per region, occurring on a random day of the week. The
following list shows the time blocks for each region from which the
default maintenance windows are assigned.
+ **US-East (Northern Virginia) Region:** 03:00-11:00 UTC
+ **US-West (Oregon) Region** 06:00-14:00 UTC
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Minimum 30-minute window.
:type cluster_parameter_group_name: string
:param cluster_parameter_group_name:
The name of the parameter group to be associated with this cluster.
Default: The default Amazon Redshift cluster parameter group. For
information about the default parameter group, go to `Working with
Amazon Redshift Parameter Groups`_
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type automated_snapshot_retention_period: integer
:param automated_snapshot_retention_period: The number of days that
automated snapshots are retained. If the value is 0, automated
snapshots are disabled. Even if automated snapshots are disabled,
you can still create manual snapshots when you want with
CreateClusterSnapshot.
Default: `1`
Constraints: Must be a value from 0 to 35.
:type port: integer
:param port: The port number on which the cluster accepts incoming
connections.
The cluster is accessible only via the JDBC and ODBC connection
strings. Part of the connection string requires the port on which
the cluster will listen for incoming connections.
Default: `5439`
Valid Values: `1150-65535`
:type cluster_version: string
:param cluster_version: The version of the Amazon Redshift engine
software that you want to deploy on the cluster.
The version selected runs on all the nodes in the cluster.
Constraints: Only version 1.0 is currently available.
Example: `1.0`
:type allow_version_upgrade: boolean
:param allow_version_upgrade: If `True`, upgrades can be applied during
the maintenance window to the Amazon Redshift engine that is
running on the cluster.
When a new version of the Amazon Redshift engine is released, you can
request that the service automatically apply upgrades during the
maintenance window to the Amazon Redshift engine that is running on
your cluster.
Default: `True`
:type number_of_nodes: integer
:param number_of_nodes: The number of compute nodes in the cluster.
This parameter is required when the **ClusterType** parameter is
specified as `multi-node`.
For information about determining how many nodes you need, go to `
Working with Clusters`_ in the Amazon Redshift Management Guide .
If you don't specify this parameter, you get a single-node cluster.
When requesting a multi-node cluster, you must specify the number
of nodes that you want in the cluster.
Default: `1`
Constraints: Value must be at least 1 and no more than 100.
:type publicly_accessible: boolean
:param publicly_accessible: If `True`, the cluster can be accessed from
a public network.
:type encrypted: boolean
:param encrypted: If `True`, the data in cluster is encrypted at rest.
Default: false
"""
params = {
'ClusterIdentifier': cluster_identifier,
'NodeType': node_type,
'MasterUsername': master_username,
'MasterUserPassword': <PASSWORD>,
}
if db_name is not None:
params['DBName'] = db_name
if cluster_type is not None:
params['ClusterType'] = cluster_type
if cluster_security_groups is not None:
self.build_list_params(params,
cluster_security_groups,
'ClusterSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if cluster_subnet_group_name is not None:
params['ClusterSubnetGroupName'] = cluster_subnet_group_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if cluster_parameter_group_name is not None:
params['ClusterParameterGroupName'] = cluster_parameter_group_name
if automated_snapshot_retention_period is not None:
params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
if port is not None:
params['Port'] = port
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
if number_of_nodes is not None:
params['NumberOfNodes'] = number_of_nodes
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if encrypted is not None:
params['Encrypted'] = str(
encrypted).lower()
return self._make_request(
action='CreateCluster',
verb='POST',
path='/', params=params)
def create_cluster_parameter_group(self, parameter_group_name,
parameter_group_family, description):
"""
Creates an Amazon Redshift parameter group.
Creating parameter groups is independent of creating clusters.
You can associate a cluster with a parameter group when you
create the cluster. You can also associate an existing cluster
with a parameter group after the cluster is created by using
ModifyCluster.
Parameters in the parameter group define specific behavior
that applies to the databases you create on the cluster. For
more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name:
The name of the cluster parameter group.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique withing your AWS account.
This value is stored as a lower-case string.
:type parameter_group_family: string
:param parameter_group_family: The Amazon Redshift engine version to
which the cluster parameter group applies. The cluster engine
version determines the set of parameters.
To get a list of valid parameter group family names, you can call
DescribeClusterParameterGroups. By default, Amazon Redshift returns
a list of all the parameter groups that are owned by your AWS
account, including the default parameter groups for each Amazon
Redshift engine version. The parameter group family names
associated with the default parameter groups provide you the valid
values. For example, a valid family name is "redshift-1.0".
:type description: string
:param description: A description of the parameter group.
"""
params = {
'ParameterGroupName': parameter_group_name,
'ParameterGroupFamily': parameter_group_family,
'Description': description,
}
return self._make_request(
action='CreateClusterParameterGroup',
verb='POST',
path='/', params=params)
def create_cluster_security_group(self, cluster_security_group_name,
description):
"""
Creates a new Amazon Redshift security group. You use security
groups to control access to non-VPC clusters.
For information about managing security groups, go to`Amazon
Redshift Cluster Security Groups`_ in the Amazon Redshift
Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name for the security group.
Amazon Redshift stores the value as a lowercase string.
Constraints:
+ Must contain no more than 255 alphanumeric characters or hyphens.
+ Must not be "Default".
+ Must be unique for all security groups that are created by your AWS
account.
Example: `examplesecuritygroup`
:type description: string
:param description: A description for the security group.
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
'Description': description,
}
return self._make_request(
action='CreateClusterSecurityGroup',
verb='POST',
path='/', params=params)
def create_cluster_snapshot(self, snapshot_identifier,
cluster_identifier):
"""
Creates a manual snapshot of the specified cluster. The
cluster must be in the "available" state.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type snapshot_identifier: string
:param snapshot_identifier: A unique identifier for the snapshot that
you are requesting. This identifier must be unique for all
snapshots within the AWS account.
Constraints:
+ Cannot be null, empty, or blank
+ Must contain from 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
Example: `my-snapshot-id`
:type cluster_identifier: string
:param cluster_identifier: The cluster identifier for which you want a
snapshot.
"""
params = {
'SnapshotIdentifier': snapshot_identifier,
'ClusterIdentifier': cluster_identifier,
}
return self._make_request(
action='CreateClusterSnapshot',
verb='POST',
path='/', params=params)
def create_cluster_subnet_group(self, cluster_subnet_group_name,
description, subnet_ids):
"""
Creates a new Amazon Redshift subnet group. You must provide a
list of one or more subnets in your existing Amazon Virtual
Private Cloud (Amazon VPC) when creating Amazon Redshift
subnet group.
For information about subnet groups, go to`Amazon Redshift
Cluster Subnet Groups`_ in the Amazon Redshift Management
Guide .
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name for the subnet group. Amazon
Redshift stores the value as a lowercase string.
Constraints:
+ Must contain no more than 255 alphanumeric characters or hyphens.
+ Must not be "Default".
+ Must be unique for all subnet groups that are created by your AWS
account.
Example: `examplesubnetgroup`
:type description: string
:param description: A description for the subnet group.
:type subnet_ids: list
:param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets
can be modified in a single request.
"""
params = {
'ClusterSubnetGroupName': cluster_subnet_group_name,
'Description': description,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
return self._make_request(
action='CreateClusterSubnetGroup',
verb='POST',
path='/', params=params)
def delete_cluster(self, cluster_identifier,
skip_final_cluster_snapshot=None,
final_cluster_snapshot_identifier=None):
"""
Deletes a previously provisioned cluster. A successful
response from the web service indicates that the request was
received correctly. If a final cluster snapshot is requested
the status of the cluster will be "final-snapshot" while the
snapshot is being taken, then it's "deleting" once Amazon
Redshift begins deleting the cluster. Use DescribeClusters to
monitor the status of the deletion. The delete operation
cannot be canceled or reverted once submitted. For more
information about managing clusters, go to `Amazon Redshift
Clusters`_ in the Amazon Redshift Management Guide .
:type cluster_identifier: string
:param cluster_identifier:
The identifier of the cluster to be deleted.
Constraints:
+ Must contain lowercase characters.
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
:type skip_final_cluster_snapshot: boolean
:param skip_final_cluster_snapshot: Determines whether a final snapshot
of the cluster is created before Amazon Redshift deletes the
cluster. If `True`, a final cluster snapshot is not created. If
`False`, a final cluster snapshot is created before the cluster is
deleted.
The FinalClusterSnapshotIdentifier parameter must be specified if
SkipFinalClusterSnapshot is `False`.
Default: `False`
:type final_cluster_snapshot_identifier: string
:param final_cluster_snapshot_identifier:
The identifier of the final snapshot that is to be created immediately
before deleting the cluster. If this parameter is provided,
SkipFinalClusterSnapshot must be `False`.
Constraints:
+ Must be 1 to 255 alphanumeric characters.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
"""
params = {'ClusterIdentifier': cluster_identifier, }
if skip_final_cluster_snapshot is not None:
params['SkipFinalClusterSnapshot'] = str(
skip_final_cluster_snapshot).lower()
if final_cluster_snapshot_identifier is not None:
params['FinalClusterSnapshotIdentifier'] = final_cluster_snapshot_identifier
return self._make_request(
action='DeleteCluster',
verb='POST',
path='/', params=params)
def delete_cluster_parameter_group(self, parameter_group_name):
"""
Deletes a specified Amazon Redshift parameter group. You
cannot delete a parameter group if it is associated with a
cluster.
:type parameter_group_name: string
:param parameter_group_name:
The name of the parameter group to be deleted.
Constraints:
+ Must be the name of an existing cluster parameter group.
+ Cannot delete a default cluster parameter group.
"""
params = {'ParameterGroupName': parameter_group_name, }
return self._make_request(
action='DeleteClusterParameterGroup',
verb='POST',
path='/', params=params)
def delete_cluster_security_group(self, cluster_security_group_name):
"""
Deletes an Amazon Redshift security group.
You cannot delete a security group that is associated with any
clusters. You cannot delete the default security group.
For information about managing security groups, go to`Amazon
Redshift Cluster Security Groups`_ in the Amazon Redshift
Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of the cluster security
group to be deleted.
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
}
return self._make_request(
action='DeleteClusterSecurityGroup',
verb='POST',
path='/', params=params)
def delete_cluster_snapshot(self, snapshot_identifier,
snapshot_cluster_identifier=None):
"""
Deletes the specified manual snapshot. The snapshot must be in
the "available" state, with no other users authorized to
access the snapshot.
Unlike automated snapshots, manual snapshots are retained even
after you delete your cluster. Amazon Redshift does not delete
your manual snapshots. You must delete manual snapshot
explicitly to avoid getting charged. If other accounts are
authorized to access the snapshot, you must revoke all of the
authorizations before you can delete the snapshot.
:type snapshot_identifier: string
:param snapshot_identifier: The unique identifier of the manual
snapshot to be deleted.
Constraints: Must be the name of an existing snapshot that is in the
`available` state.
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier:
"""
params = {'SnapshotIdentifier': snapshot_identifier, }
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='DeleteClusterSnapshot',
verb='POST',
path='/', params=params)
def delete_cluster_subnet_group(self, cluster_subnet_group_name):
"""
Deletes the specified cluster subnet group.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the cluster subnet group
name to be deleted.
"""
params = {
'ClusterSubnetGroupName': cluster_subnet_group_name,
}
return self._make_request(
action='DeleteClusterSubnetGroup',
verb='POST',
path='/', params=params)
def describe_cluster_parameter_groups(self, parameter_group_name=None,
max_records=None, marker=None):
"""
Returns a list of Amazon Redshift parameter groups, including
parameter groups you created and the default parameter group.
For each parameter group, the response includes the parameter
group name, description, and parameter group family name. You
can optionally specify a name to retrieve the description of a
specific parameter group.
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name: The name of a specific parameter group for
which to return details. By default, details about all parameter
groups and the default parameter group are returned.
:type max_records: integer
:param max_records: The maximum number of parameter group records to
include in the response. If more records exist than the specified
`MaxRecords` value, the response includes a marker that you can use
in a subsequent DescribeClusterParameterGroups request to retrieve
the next set of records.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned by a previous
DescribeClusterParameterGroups request to indicate the first
parameter group that the current request will return.
"""
params = {}
if parameter_group_name is not None:
params['ParameterGroupName'] = parameter_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterParameterGroups',
verb='POST',
path='/', params=params)
def describe_cluster_parameters(self, parameter_group_name, source=None,
max_records=None, marker=None):
"""
Returns a detailed list of parameters contained within the
specified Amazon Redshift parameter group. For each parameter
the response includes information such as parameter name,
description, data type, value, whether the parameter value is
modifiable, and so on.
You can specify source filter to retrieve parameters of only
specific type. For example, to retrieve parameters that were
modified by a user action such as from
ModifyClusterParameterGroup, you can specify source equal to
user .
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name: The name of a cluster parameter group for
which to return details.
:type source: string
:param source: The parameter types to return. Specify `user` to show
parameters that are different form the default. Similarly, specify
`engine-default` to show parameters that are the same as the
default parameter group.
Default: All parameter types returned.
Valid Values: `user` | `engine-default`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, response includes a marker that you can specify in your
subsequent request to retrieve remaining result.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned from a previous
**DescribeClusterParameters** request. If this parameter is
specified, the response includes only records beyond the specified
marker, up to the value specified by `MaxRecords`.
"""
params = {'ParameterGroupName': parameter_group_name, }
if source is not None:
params['Source'] = source
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterParameters',
verb='POST',
path='/', params=params)
def describe_cluster_security_groups(self,
cluster_security_group_name=None,
max_records=None, marker=None):
"""
Returns information about Amazon Redshift security groups. If
the name of a security group is specified, the response will
contain only information about only that security group.
For information about managing security groups, go to`Amazon
Redshift Cluster Security Groups`_ in the Amazon Redshift
Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of a cluster security
group for which you are requesting details. You can specify either
the **Marker** parameter or a **ClusterSecurityGroupName**
parameter, but not both.
Example: `securitygroup1`
:type max_records: integer
:param max_records: The maximum number of records to be included in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response, which you can use in a
subsequent DescribeClusterSecurityGroups request.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned by a previous
DescribeClusterSecurityGroups request to indicate the first
security group that the current request will return. You can
specify either the **Marker** parameter or a
**ClusterSecurityGroupName** parameter, but not both.
"""
params = {}
if cluster_security_group_name is not None:
params['ClusterSecurityGroupName'] = cluster_security_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterSecurityGroups',
verb='POST',
path='/', params=params)
def describe_cluster_snapshots(self, cluster_identifier=None,
snapshot_identifier=None,
snapshot_type=None, start_time=None,
end_time=None, max_records=None,
marker=None, owner_account=None):
"""
Returns one or more snapshot objects, which contain metadata
about your cluster snapshots. By default, this operation
returns information about all snapshots of all clusters that
are owned by you AWS customer account. No information is
returned for snapshots owned by inactive AWS customer
accounts.
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster for which
information about snapshots is requested.
:type snapshot_identifier: string
:param snapshot_identifier: The snapshot identifier of the snapshot
about which to return information.
:type snapshot_type: string
:param snapshot_type: The type of snapshots for which you are
requesting information. By default, snapshots of all types are
returned.
Valid Values: `automated` | `manual`
:type start_time: timestamp
:param start_time: A value that requests only snapshots created at or
after the specified time. The time value is specified in ISO 8601
format. For more information about ISO 8601, go to the `ISO8601
Wikipedia page.`_
Example: `2012-07-16T18:00:00Z`
:type end_time: timestamp
:param end_time: A time value that requests only snapshots created at
or before the specified time. The time value is specified in ISO
8601 format. For more information about ISO 8601, go to the
`ISO8601 Wikipedia page.`_
Example: `2012-07-16T18:00:00Z`
:type max_records: integer
:param max_records: The maximum number of snapshot records to include
in the response. If more records exist than the specified
`MaxRecords` value, the response returns a marker that you can use
in a subsequent DescribeClusterSnapshots request in order to
retrieve the next set of snapshot records.
Default: `100`
Constraints: Must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned by a previous
DescribeClusterSnapshots request to indicate the first snapshot
that the request will return.
:type owner_account: string
:param owner_account: The AWS customer account used to create or copy
the snapshot. Use this field to filter the results to snapshots
owned by a particular account. To describe snapshots you own,
either specify your AWS customer account, or do not specify the
parameter.
"""
params = {}
if cluster_identifier is not None:
params['ClusterIdentifier'] = cluster_identifier
if snapshot_identifier is not None:
params['SnapshotIdentifier'] = snapshot_identifier
if snapshot_type is not None:
params['SnapshotType'] = snapshot_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
if owner_account is not None:
params['OwnerAccount'] = owner_account
return self._make_request(
action='DescribeClusterSnapshots',
verb='POST',
path='/', params=params)
def describe_cluster_subnet_groups(self, cluster_subnet_group_name=None,
max_records=None, marker=None):
"""
Returns one or more cluster subnet group objects, which
contain metadata about your cluster subnet groups. By default,
this operation returns information about all cluster subnet
groups that are defined in you AWS account.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the cluster subnet group
for which information is requested.
:type max_records: integer
:param max_records: The maximum number of cluster subnet group records
to include in the response. If more records exist than the
specified `MaxRecords` value, the response returns a marker that
you can use in a subsequent DescribeClusterSubnetGroups request in
order to retrieve the next set of cluster subnet group records.
Default: 100
Constraints: Must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned by a previous
DescribeClusterSubnetGroups request to indicate the first cluster
subnet group that the current request will return.
"""
params = {}
if cluster_subnet_group_name is not None:
params['ClusterSubnetGroupName'] = cluster_subnet_group_name
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterSubnetGroups',
verb='POST',
path='/', params=params)
def describe_cluster_versions(self, cluster_version=None,
cluster_parameter_group_family=None,
max_records=None, marker=None):
"""
Returns descriptions of the available Amazon Redshift cluster
versions. You can call this operation even before creating any
clusters to learn more about the Amazon Redshift versions. For
more information about managing clusters, go to `Amazon
Redshift Clusters`_ in the Amazon Redshift Management Guide
:type cluster_version: string
:param cluster_version: The specific cluster version to return.
Example: `1.0`
:type cluster_parameter_group_family: string
:param cluster_parameter_group_family:
The name of a specific cluster parameter group family to return details
for.
Constraints:
+ Must be 1 to 255 alphanumeric characters
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more than the `MaxRecords` value is available, a
marker is included in the response so that the following results
can be retrieved.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: The marker returned from a previous request. If this
parameter is specified, the response includes records beyond the
marker only, up to `MaxRecords`.
"""
params = {}
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if cluster_parameter_group_family is not None:
params['ClusterParameterGroupFamily'] = cluster_parameter_group_family
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusterVersions',
verb='POST',
path='/', params=params)
def describe_clusters(self, cluster_identifier=None, max_records=None,
marker=None):
"""
Returns properties of provisioned clusters including general
cluster properties, cluster database properties, maintenance
and backup properties, and security and access properties.
This operation supports pagination. For more information about
managing clusters, go to `Amazon Redshift Clusters`_ in the
Amazon Redshift Management Guide .
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of a cluster whose
properties you are requesting. This parameter isn't case sensitive.
The default is that all clusters defined for an account are returned.
:type max_records: integer
:param max_records: The maximum number of records that the response can
include. If more records exist than the specified `MaxRecords`
value, a `marker` is included in the response that can be used in a
new **DescribeClusters** request to continue listing results.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned by a previous
**DescribeClusters** request to indicate the first cluster that the
current **DescribeClusters** request will return.
You can specify either a **Marker** parameter or a
**ClusterIdentifier** parameter in a **DescribeClusters** request,
but not both.
"""
params = {}
if cluster_identifier is not None:
params['ClusterIdentifier'] = cluster_identifier
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeClusters',
verb='POST',
path='/', params=params)
def describe_default_cluster_parameters(self, parameter_group_family,
max_records=None, marker=None):
"""
Returns a list of parameter settings for the specified
parameter group family.
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_family: string
:param parameter_group_family: The name of the cluster parameter group
family.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results may be retrieved.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned from a previous
**DescribeDefaultClusterParameters** request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {'ParameterGroupFamily': parameter_group_family, }
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeDefaultClusterParameters',
verb='POST',
path='/', params=params)
def describe_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None, duration=None,
max_records=None, marker=None):
"""
Returns events related to clusters, security groups,
snapshots, and parameter groups for the past 14 days. Events
specific to a particular cluster, security group, snapshot or
parameter group can be obtained by providing the name as a
parameter. By default, the past hour of events are returned.
:type source_identifier: string
:param source_identifier:
The identifier of the event source for which events will be returned.
If this parameter is not specified, then all sources are included
in the response.
Constraints:
If SourceIdentifier is supplied, SourceType must also be provided.
+ Specify a cluster identifier when SourceType is `cluster`.
+ Specify a cluster security group name when SourceType is `cluster-
security-group`.
+ Specify a cluster parameter group name when SourceType is `cluster-
parameter-group`.
+ Specify a cluster snapshot identifier when SourceType is `cluster-
snapshot`.
:type source_type: string
:param source_type:
The event source to retrieve events for. If no value is specified, all
events are returned.
Constraints:
If SourceType is supplied, SourceIdentifier must also be provided.
+ Specify `cluster` when SourceIdentifier is a cluster identifier.
+ Specify `cluster-security-group` when SourceIdentifier is a cluster
security group name.
+ Specify `cluster-parameter-group` when SourceIdentifier is a cluster
parameter group name.
+ Specify `cluster-snapshot` when SourceIdentifier is a cluster
snapshot identifier.
:type start_time: timestamp
:param start_time: The beginning of the time interval to retrieve
events for, specified in ISO 8601 format. For more information
about ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: `2009-07-08T18:00Z`
:type end_time: timestamp
:param end_time: The end of the time interval for which to retrieve
events, specified in ISO 8601 format. For more information about
ISO 8601, go to the `ISO8601 Wikipedia page.`_
Example: `2009-07-08T18:00Z`
:type duration: integer
:param duration: The number of minutes prior to the time of the request
for which to retrieve events. For example, if the request is sent
at 18:00 and you specify a duration of 60, then only events which
have occurred after 17:00 will be returned.
Default: `60`
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results may be retrieved.
Default: `100`
Constraints: Value must be at least 20 and no more than 100.
:type marker: string
:param marker: An optional marker returned from a previous
**DescribeEvents** request. If this parameter is specified, the
response includes only records beyond the marker, up to the value
specified by `MaxRecords`.
"""
params = {}
if source_identifier is not None:
params['SourceIdentifier'] = source_identifier
if source_type is not None:
params['SourceType'] = source_type
if start_time is not None:
params['StartTime'] = start_time
if end_time is not None:
params['EndTime'] = end_time
if duration is not None:
params['Duration'] = duration
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeEvents',
verb='POST',
path='/', params=params)
def describe_orderable_cluster_options(self, cluster_version=None,
node_type=None, max_records=None,
marker=None):
"""
Returns a list of orderable cluster options. Before you create
a new cluster you can use this operation to find what options
are available, such as the EC2 Availability Zones (AZ) in the
specific AWS region that you can specify, and the node types
you can request. The node types differ by available storage,
memory, CPU and price. With the cost involved you might want
to obtain a list of cluster options in the specific region and
specify values when creating a cluster. For more information
about managing clusters, go to `Amazon Redshift Clusters`_ in
the Amazon Redshift Management Guide
:type cluster_version: string
:param cluster_version: The version filter value. Specify this
parameter to show only the available offerings matching the
specified version.
Default: All versions.
Constraints: Must be one of the version returned from
DescribeClusterVersions.
:type node_type: string
:param node_type: The node type filter value. Specify this parameter to
show only the available offerings matching the specified node type.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results may be retrieved.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional marker returned from a previous
**DescribeOrderableClusterOptions** request. If this parameter is
specified, the response includes only records beyond the marker, up
to the value specified by `MaxRecords`.
"""
params = {}
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if node_type is not None:
params['NodeType'] = node_type
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeOrderableClusterOptions',
verb='POST',
path='/', params=params)
def describe_reserved_node_offerings(self,
reserved_node_offering_id=None,
max_records=None, marker=None):
"""
Returns a list of the available reserved node offerings by
Amazon Redshift with their descriptions including the node
type, the fixed and recurring costs of reserving the node and
duration the node will be reserved for you. These descriptions
help you determine which reserve node offering you want to
purchase. You then use the unique offering ID in you call to
PurchaseReservedNodeOffering to reserve one or more nodes for
your Amazon Redshift cluster.
For more information about managing parameter groups, go to
`Purchasing Reserved Nodes`_ in the Amazon Redshift Management
Guide .
:type reserved_node_offering_id: string
:param reserved_node_offering_id: The unique identifier for the
offering.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results may be retrieved.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional marker returned by a previous
DescribeReservedNodeOfferings request to indicate the first
offering that the request will return.
You can specify either a **Marker** parameter or a
**ClusterIdentifier** parameter in a DescribeClusters request, but
not both.
"""
params = {}
if reserved_node_offering_id is not None:
params['ReservedNodeOfferingId'] = reserved_node_offering_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedNodeOfferings',
verb='POST',
path='/', params=params)
def describe_reserved_nodes(self, reserved_node_id=None,
max_records=None, marker=None):
"""
Returns the descriptions of the reserved nodes.
:type reserved_node_id: string
:param reserved_node_id: Identifier for the node reservation.
:type max_records: integer
:param max_records: The maximum number of records to include in the
response. If more records exist than the specified `MaxRecords`
value, a marker is included in the response so that the remaining
results may be retrieved.
Default: `100`
Constraints: minimum 20, maximum 100.
:type marker: string
:param marker: An optional marker returned by a previous
DescribeReservedNodes request to indicate the first parameter group
that the current request will return.
"""
params = {}
if reserved_node_id is not None:
params['ReservedNodeId'] = reserved_node_id
if max_records is not None:
params['MaxRecords'] = max_records
if marker is not None:
params['Marker'] = marker
return self._make_request(
action='DescribeReservedNodes',
verb='POST',
path='/', params=params)
def describe_resize(self, cluster_identifier):
"""
Returns information about the last resize operation for the
specified cluster. If no resize operation has ever been
initiated for the specified cluster, a `HTTP 404` error is
returned. If a resize operation was initiated and completed,
the status of the resize remains as `SUCCEEDED` until the next
resize.
A resize operation can be requested using ModifyCluster and
specifying a different number or type of nodes for the
cluster.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of a cluster whose
resize progress you are requesting. This parameter isn't case-
sensitive.
By default, resize operations for all clusters defined for an AWS
account are returned.
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='DescribeResize',
verb='POST',
path='/', params=params)
def modify_cluster(self, cluster_identifier, cluster_type=None,
node_type=None, number_of_nodes=None,
cluster_security_groups=None,
vpc_security_group_ids=None,
master_user_password=<PASSWORD>,
cluster_parameter_group_name=None,
automated_snapshot_retention_period=None,
preferred_maintenance_window=None,
cluster_version=None, allow_version_upgrade=None):
"""
Modifies the settings for a cluster. For example, you can add
another security or parameter group, update the preferred
maintenance window, or change the master user password.
Resetting a cluster password or modifying the security groups
associated with a cluster do not need a reboot. However,
modifying parameter group requires a reboot for parameters to
take effect. For more information about managing clusters, go
to `Amazon Redshift Clusters`_ in the Amazon Redshift
Management Guide
You can also change node type and the number of nodes to scale
up or down the cluster. When resizing a cluster, you must
specify both the number of nodes and the node type even if one
of the parameters does not change. If you specify the same
number of nodes and node type that are already configured for
the cluster, an error is returned.
:type cluster_identifier: string
:param cluster_identifier: The unique identifier of the cluster to be
modified.
Example: `examplecluster`
:type cluster_type: string
:param cluster_type: The new cluster type.
When you submit your cluster resize request, your existing cluster goes
into a read-only mode. After Amazon Redshift provisions a new
cluster based on your resize requirements, there will be outage for
a period while the old cluster is deleted and your connection is
switched to the new cluster. You can use DescribeResize to track
the progress of the resize request.
Valid Values: ` multi-node | single-node `
:type node_type: string
:param node_type: The new node type of the cluster. If you specify a
new node type, you must also specify the number of nodes parameter
also.
When you submit your request to resize a cluster, Amazon Redshift sets
access permissions for the cluster to read-only. After Amazon
Redshift provisions a new cluster according to your resize
requirements, there will be a temporary outage while the old
cluster is deleted and your connection is switched to the new
cluster. When the new connection is complete, the original access
permissions for the cluster are restored. You can use the
DescribeResize to track the progress of the resize request.
Valid Values: ` dw.hs1.xlarge` | `dw.hs1.8xlarge`
:type number_of_nodes: integer
:param number_of_nodes: The new number of nodes of the cluster. If you
specify a new number of nodes, you must also specify the node type
parameter also.
When you submit your request to resize a cluster, Amazon Redshift sets
access permissions for the cluster to read-only. After Amazon
Redshift provisions a new cluster according to your resize
requirements, there will be a temporary outage while the old
cluster is deleted and your connection is switched to the new
cluster. When the new connection is complete, the original access
permissions for the cluster are restored. You can use
DescribeResize to track the progress of the resize request.
Valid Values: Integer greater than `0`.
:type cluster_security_groups: list
:param cluster_security_groups:
A list of cluster security groups to be authorized on this cluster.
This change is asynchronously applied as soon as possible.
Security groups currently associated with the cluster and not in the
list of groups to apply, will be revoked from the cluster.
Constraints:
+ Must be 1 to 255 alphanumeric characters or hyphens
+ First character must be a letter
+ Cannot end with a hyphen or contain two consecutive hyphens
:type vpc_security_group_ids: list
:param vpc_security_group_ids: A list of Virtual Private Cloud (VPC)
security groups to be associated with the cluster.
:type master_user_password: string
:param master_user_password:
The new password for the cluster master user. This change is
asynchronously applied as soon as possible. Between the time of the
request and the completion of the request, the `MasterUserPassword`
element exists in the `PendingModifiedValues` element of the
operation response.
Operations never return the password, so this operation provides a way
to regain access to the master user account for a cluster if the
password is lost.
Default: Uses existing setting.
Constraints:
+ Must be between 8 and 64 characters in length.
+ Must contain at least one uppercase letter.
+ Must contain at least one lowercase letter.
+ Must contain one number.
+ Can be any printable ASCII character (ASCII code 33 to 126) except '
(single quote), " (double quote), \, /, @, or space.
:type cluster_parameter_group_name: string
:param cluster_parameter_group_name: The name of the cluster parameter
group to apply to this cluster. This change is applied only after
the cluster is rebooted. To reboot a cluster use RebootCluster.
Default: Uses existing setting.
Constraints: The cluster parameter group must be in the same parameter
group family that matches the cluster version.
:type automated_snapshot_retention_period: integer
:param automated_snapshot_retention_period: The number of days that
automated snapshots are retained. If the value is 0, automated
snapshots are disabled. Even if automated snapshots are disabled,
you can still create manual snapshots when you want with
CreateClusterSnapshot.
If you decrease the automated snapshot retention period from its
current value, existing automated snapshots which fall outside of
the new retention period will be immediately deleted.
Default: Uses existing setting.
Constraints: Must be a value from 0 to 35.
:type preferred_maintenance_window: string
:param preferred_maintenance_window: The weekly time range (in UTC)
during which system maintenance can occur, if necessary. If system
maintenance is necessary during the window, it may result in an
outage.
This maintenance window change is made immediately. If the new
maintenance window indicates the current time, there must be at
least 120 minutes between the current time and end of the window in
order to ensure that pending changes are applied.
Default: Uses existing setting.
Format: ddd:hh24:mi-ddd:hh24:mi, for example `wed:07:30-wed:08:00`.
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes.
:type cluster_version: string
:param cluster_version: The new version number of the Amazon Redshift
engine to upgrade to.
For major version upgrades, if a non-default cluster parameter group is
currently in use, a new cluster parameter group in the cluster
parameter group family for the new version must be specified. The
new cluster parameter group can be the default for that cluster
parameter group family. For more information about managing
parameter groups, go to `Amazon Redshift Parameter Groups`_ in the
Amazon Redshift Management Guide .
Example: `1.0`
:type allow_version_upgrade: boolean
:param allow_version_upgrade: If `True`, upgrades will be applied
automatically to the cluster during the maintenance window.
Default: `False`
"""
params = {'ClusterIdentifier': cluster_identifier, }
if cluster_type is not None:
params['ClusterType'] = cluster_type
if node_type is not None:
params['NodeType'] = node_type
if number_of_nodes is not None:
params['NumberOfNodes'] = number_of_nodes
if cluster_security_groups is not None:
self.build_list_params(params,
cluster_security_groups,
'ClusterSecurityGroups.member')
if vpc_security_group_ids is not None:
self.build_list_params(params,
vpc_security_group_ids,
'VpcSecurityGroupIds.member')
if master_user_password is not None:
params['MasterUserPassword'] = <PASSWORD>
if cluster_parameter_group_name is not None:
params['ClusterParameterGroupName'] = cluster_parameter_group_name
if automated_snapshot_retention_period is not None:
params['AutomatedSnapshotRetentionPeriod'] = automated_snapshot_retention_period
if preferred_maintenance_window is not None:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if cluster_version is not None:
params['ClusterVersion'] = cluster_version
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
return self._make_request(
action='ModifyCluster',
verb='POST',
path='/', params=params)
def modify_cluster_parameter_group(self, parameter_group_name,
parameters):
"""
Modifies the parameters of a parameter group.
For more information about managing parameter groups, go to
`Amazon Redshift Parameter Groups`_ in the Amazon Redshift
Management Guide .
:type parameter_group_name: string
:param parameter_group_name: The name of the parameter group to be
modified.
:type parameters: list
:param parameters: An array of parameters to be modified. A maximum of
20 parameters can be modified in a single request.
For each parameter to be modified, you must supply at least the
parameter name and parameter value; other name-value pairs of the
parameter are optional.
"""
params = {'ParameterGroupName': parameter_group_name, }
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion'))
return self._make_request(
action='ModifyClusterParameterGroup',
verb='POST',
path='/', params=params)
def modify_cluster_subnet_group(self, cluster_subnet_group_name,
subnet_ids, description=None):
"""
Modifies a cluster subnet group to include the specified list
of VPC subnets. The operation replaces the existing list of
subnets with the new list of subnets.
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the subnet group to be
modified.
:type description: string
:param description: A text description of the subnet group to be
modified.
:type subnet_ids: list
:param subnet_ids: An array of VPC subnet IDs. A maximum of 20 subnets
can be modified in a single request.
"""
params = {
'ClusterSubnetGroupName': cluster_subnet_group_name,
}
self.build_list_params(params,
subnet_ids,
'SubnetIds.member')
if description is not None:
params['Description'] = description
return self._make_request(
action='ModifyClusterSubnetGroup',
verb='POST',
path='/', params=params)
def purchase_reserved_node_offering(self, reserved_node_offering_id,
node_count=None):
"""
Allows you to purchase reserved nodes. Amazon Redshift offers
a predefined set of reserved node offerings. You can purchase
one of the offerings. You can call the
DescribeReservedNodeOfferings API to obtain the available
reserved node offerings. You can call this API by providing a
specific reserved node offering and the number of nodes you
want to reserve.
For more information about managing parameter groups, go to
`Purchasing Reserved Nodes`_ in the Amazon Redshift Management
Guide .
:type reserved_node_offering_id: string
:param reserved_node_offering_id: The unique identifier of the reserved
node offering you want to purchase.
:type node_count: integer
:param node_count: The number of reserved nodes you want to purchase.
Default: `1`
"""
params = {
'ReservedNodeOfferingId': reserved_node_offering_id,
}
if node_count is not None:
params['NodeCount'] = node_count
return self._make_request(
action='PurchaseReservedNodeOffering',
verb='POST',
path='/', params=params)
def reboot_cluster(self, cluster_identifier):
"""
Reboots a cluster. This action is taken as soon as possible.
It results in a momentary outage to the cluster, during which
the cluster status is set to `rebooting`. A cluster event is
created when the reboot is completed. Any pending cluster
modifications (see ModifyCluster) are applied at this reboot.
For more information about managing clusters, go to `Amazon
Redshift Clusters`_ in the Amazon Redshift Management Guide
:type cluster_identifier: string
:param cluster_identifier: The cluster identifier.
"""
params = {'ClusterIdentifier': cluster_identifier, }
return self._make_request(
action='RebootCluster',
verb='POST',
path='/', params=params)
def reset_cluster_parameter_group(self, parameter_group_name,
reset_all_parameters=None,
parameters=None):
"""
Sets one or more parameters of the specified parameter group
to their default values and sets the source values of the
parameters to "engine-default". To reset the entire parameter
group specify the ResetAllParameters parameter. For parameter
changes to take effect you must reboot any associated
clusters.
:type parameter_group_name: string
:param parameter_group_name: The name of the cluster parameter group to
be reset.
:type reset_all_parameters: boolean
:param reset_all_parameters: If `True`, all parameters in the specified
parameter group will be reset to their default values.
Default: `True`
:type parameters: list
:param parameters: An array of names of parameters to be reset. If
ResetAllParameters option is not used, then at least one parameter
name must be supplied.
Constraints: A maximum of 20 parameters can be reset in a single
request.
"""
params = {'ParameterGroupName': parameter_group_name, }
if reset_all_parameters is not None:
params['ResetAllParameters'] = str(
reset_all_parameters).lower()
if parameters is not None:
self.build_complex_list_params(
params, parameters,
'Parameters.member',
('ParameterName', 'ParameterValue', 'Description', 'Source', 'DataType', 'AllowedValues', 'IsModifiable', 'MinimumEngineVersion'))
return self._make_request(
action='ResetClusterParameterGroup',
verb='POST',
path='/', params=params)
def restore_from_cluster_snapshot(self, cluster_identifier,
snapshot_identifier,
snapshot_cluster_identifier=None,
port=None, availability_zone=None,
allow_version_upgrade=None,
cluster_subnet_group_name=None,
publicly_accessible=None,
owner_account=None):
"""
Creates a new cluster from a snapshot. Amazon Redshift creates
the resulting cluster with the same configuration as the
original cluster from which the snapshot was created, except
that the new cluster is created with the default cluster
security and parameter group. After Amazon Redshift creates
the cluster you can use the ModifyCluster API to associate a
different security group and different parameter group with
the restored cluster.
If a snapshot is taken of a cluster in VPC, you can restore it
only in VPC. In this case, you must provide a cluster subnet
group where you want the cluster restored. If snapshot is
taken of a cluster outside VPC, then you can restore it only
outside VPC.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type cluster_identifier: string
:param cluster_identifier: The identifier of the cluster that will be
created from restoring the snapshot.
Constraints:
+ Must contain from 1 to 63 alphanumeric characters or hyphens.
+ Alphabetic characters must be lowercase.
+ First character must be a letter.
+ Cannot end with a hyphen or contain two consecutive hyphens.
+ Must be unique for all clusters within an AWS account.
:type snapshot_identifier: string
:param snapshot_identifier: The name of the snapshot from which to
create the new cluster. This parameter isn't case sensitive.
Example: `my-snapshot-id`
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier:
:type port: integer
:param port: The port number on which the cluster accepts connections.
Default: The same port as the original cluster.
Constraints: Must be between `1115` and `65535`.
:type availability_zone: string
:param availability_zone: The Amazon EC2 Availability Zone in which to
restore the cluster.
Default: A random, system-chosen Availability Zone.
Example: `us-east-1a`
:type allow_version_upgrade: boolean
:param allow_version_upgrade: If `True`, upgrades can be applied during
the maintenance window to the Amazon Redshift engine that is
running on the cluster.
Default: `True`
:type cluster_subnet_group_name: string
:param cluster_subnet_group_name: The name of the subnet group where
you want to cluster restored.
A snapshot of cluster in VPC can be restored only in VPC. Therefore,
you must provide subnet group name where you want the cluster
restored.
:type publicly_accessible: boolean
:param publicly_accessible: If `True`, the cluster can be accessed from
a public network.
:type owner_account: string
:param owner_account: The AWS customer account used to create or copy
the snapshot. Required if you are restoring a snapshot you do not
own, optional if you own the snapshot.
"""
params = {
'ClusterIdentifier': cluster_identifier,
'SnapshotIdentifier': snapshot_identifier,
}
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
if port is not None:
params['Port'] = port
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if allow_version_upgrade is not None:
params['AllowVersionUpgrade'] = str(
allow_version_upgrade).lower()
if cluster_subnet_group_name is not None:
params['ClusterSubnetGroupName'] = cluster_subnet_group_name
if publicly_accessible is not None:
params['PubliclyAccessible'] = str(
publicly_accessible).lower()
if owner_account is not None:
params['OwnerAccount'] = owner_account
return self._make_request(
action='RestoreFromClusterSnapshot',
verb='POST',
path='/', params=params)
def revoke_cluster_security_group_ingress(self,
cluster_security_group_name,
cidrip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Revokes an ingress rule in an Amazon Redshift security group
for a previously authorized IP range or Amazon EC2 security
group. To add an ingress rule, see
AuthorizeClusterSecurityGroupIngress. For information about
managing security groups, go to`Amazon Redshift Cluster
Security Groups`_ in the Amazon Redshift Management Guide .
:type cluster_security_group_name: string
:param cluster_security_group_name: The name of the security Group from
which to revoke the ingress rule.
:type cidrip: string
:param cidrip: The IP range for which to revoke access. This range must
be a valid Classless Inter-Domain Routing (CIDR) block of IP
addresses. If `CIDRIP` is specified, `EC2SecurityGroupName` and
`EC2SecurityGroupOwnerId` cannot be provided.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 Security Group
whose access is to be revoked. If `EC2SecurityGroupName` is
specified, `EC2SecurityGroupOwnerId` must also be provided and
`CIDRIP` cannot be provided.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The AWS account number of the owner
of the security group specified in the `EC2SecurityGroupName`
parameter. The AWS access key ID is not an acceptable value. If
`EC2SecurityGroupOwnerId` is specified, `EC2SecurityGroupName` must
also be provided. and `CIDRIP` cannot be provided.
Example: `111122223333`
"""
params = {
'ClusterSecurityGroupName': cluster_security_group_name,
}
if cidrip is not None:
params['CIDRIP'] = cidrip
if ec2_security_group_name is not None:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id is not None:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
return self._make_request(
action='RevokeClusterSecurityGroupIngress',
verb='POST',
path='/', params=params)
def revoke_snapshot_access(self, snapshot_identifier,
account_with_restore_access,
snapshot_cluster_identifier=None):
"""
Removes the ability of the specified AWS customer account to
restore the specified snapshot. If the account is currently
restoring the snapshot, the restore will run to completion.
For more information about working with snapshots, go to
`Amazon Redshift Snapshots`_ in the Amazon Redshift Management
Guide .
:type snapshot_identifier: string
:param snapshot_identifier: The identifier of the snapshot that the
account can no longer access.
:type snapshot_cluster_identifier: string
:param snapshot_cluster_identifier:
:type account_with_restore_access: string
:param account_with_restore_access: The identifier of the AWS customer
account that can no longer restore the specified snapshot.
"""
params = {
'SnapshotIdentifier': snapshot_identifier,
'AccountWithRestoreAccess': account_with_restore_access,
}
if snapshot_cluster_identifier is not None:
params['SnapshotClusterIdentifier'] = snapshot_cluster_identifier
return self._make_request(
action='RevokeSnapshotAccess',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| 1.296875
| 1
|
tests/test_groupings.py
|
p-snft/oemof.network
| 0
|
12782740
|
<reponame>p-snft/oemof.network
""" Specific tests for the `oemof.groupings` module.
Most parts of the `groupings` module are tested via other tests, but certain
code paths don't get covered by those, which is what this module is for.
This file is part of project oemof (github.com/oemof/oemof). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location oemof/tests/tests_groupings.py
SPDX-License-Identifier: MIT
"""
from types import MappingProxyType as MaProTy
from nose.tools import assert_raises
from nose.tools import eq_
from oemof.network.groupings import Grouping
def test_initialization_argument_checks():
""" `Grouping` constructor should raise `TypeError` on bad arguments.
"""
message = "\n`Grouping` constructor did not check mandatory arguments."
with assert_raises(TypeError, msg=message):
Grouping()
message = "\n`Grouping` constructor did not check conflicting arguments."
with assert_raises(TypeError, msg=message):
Grouping(key=lambda x: x, constant_key='key')
def test_notimplementederrors():
""" `Grouping` should raise an error when reaching unreachable code.
"""
message = "\n`Grouping.key` not overriden, but no error raised."
with assert_raises(NotImplementedError, msg=message):
g = Grouping(key="key")
del g.key
g.key("dummy argument")
message = "\n`Grouping.filter` not overriden, but no error raised."
with assert_raises(NotImplementedError, msg=message):
g = Grouping(key="key")
del g.filter
g.filter("dummy argument")
def test_mutable_mapping_groups():
g = Grouping(
key=lambda x: len(x),
value=lambda x: {y: len([z for z in x if z == y]) for y in x})
groups = {}
expected = {3: {'o': 2, 'f': 1}}
g("foo", groups)
eq_(groups, expected,
"\n Expected: {} \n Got : {}".format(expected, groups))
def test_immutable_mapping_groups():
g = Grouping(
key=lambda x: len(x),
value=lambda x: MaProTy(
{y: len([z for z in x if z == y]) for y in x}))
groups = {}
expected = {3: MaProTy({'o': 2, 'f': 1})}
g("foo", groups)
eq_(groups, expected,
"\n Expected: {} \n Got : {}".format(expected, groups))
| 2.46875
| 2
|
LC/448.py
|
szhu3210/LeetCode_Solutions
| 2
|
12782741
|
class Solution(object):
def findDisappearedNumbers0(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums:
return []
nums.append(len(nums)+1)
nums.append(0)
nums.sort()
res = []
last = nums[0]
for num in nums[1:]:
if num-last>1:
res += range(last+1, num)
last = num
return res
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for n in nums:
nums[abs(n)-1]=-abs(nums[abs(n)-1])
# print nums
res = []
for i,n in enumerate(nums):
if n>0:
res.append(i+1)
return res
| 3.21875
| 3
|
minst/model.py
|
oriolromani/minst-dataset
| 44
|
12782742
|
import copy
import json
import jsonschema
import logging
import pandas as pd
import os
from sklearn.cross_validation import train_test_split
import minst.utils as utils
logger = logging.getLogger(__name__)
class MissingDataException(Exception):
pass
class Observation(object):
"""Document model each item in the collection."""
# This should use package resources :o(
SCHEMA_PATH = os.path.join(os.path.dirname(__file__), 'schema',
'observation.json')
SCHEMA = json.load(open(SCHEMA_PATH))
def __init__(self, index, dataset, audio_file, instrument, source_index,
start_time, duration, note_number=None, dynamic='',
partition=''):
"""Model definition for an instrument observation.
Parameters
----------
index :
dataset :
audio_file : str
Relative file path to an audiofile.
instrument :
source_index :
start_time :
duration :
note_number :
dynamic :
partition :
Returns
-------
obs : Observation
Populated observation
"""
self.index = index
self.dataset = dataset
self.audio_file = audio_file
self.instrument = instrument
self.source_index = source_index
self.start_time = start_time
self.duration = duration
self.note_number = note_number
self.dynamic = dynamic
self.partition = partition
def to_builtin(self):
return self.__dict__.copy()
@classmethod
def from_series(cls, series):
"""Convert a pd.Series to an Observation."""
return cls(index=series.name, **series.to_dict())
def to_series(self):
"""Convert to a flat series (ie make features a column)
Returns
-------
pandas.Series
"""
flat_dict = self.to_dict()
name = flat_dict.pop("index")
return pd.Series(data=flat_dict, name=name)
def to_dict(self):
return self.__dict__.copy()
def __getitem__(self, key):
return self.__dict__[key]
def validate(self, schema=None, verbose=False, check_files=True):
"""Returns True if valid.
"""
schema = self.SCHEMA if schema is None else schema
success = True
try:
jsonschema.validate(self.to_builtin(), schema)
except jsonschema.ValidationError as derp:
success = False
if verbose:
print("Failed schema test: \n{}".format(derp))
if success and check_files:
success &= utils.check_audio_file(self.audio_file)[0]
if not success and verbose:
print("Failed file check: \n{}".format(self.audio_file))
return success
def _enforce_obs(obs, audio_root='', strict=True):
"""Get dict from an Observation if an observation, else just dict"""
audio_file = obs['audio_file']
escaped_audio_file = os.path.join(audio_root, audio_file)
file_checks = [os.path.exists(audio_file),
os.path.exists(escaped_audio_file)]
if not any(file_checks) and strict:
raise MissingDataException(
"Audio file(s) missing:\n\tbase: {}\n\tescaped:{}"
"".format(audio_file, escaped_audio_file))
if isinstance(obs, Observation):
obs = obs.to_dict()
obs['audio_file'] = escaped_audio_file if file_checks[1] else audio_file
return obs
class Collection(object):
"""Dictionary-like collection of Observations (maintains order).
Expands relative audio files to a given `audio_root` path.
"""
# MODEL = Observation
def __init__(self, observations, audio_root='', strict=False):
"""
Parameters
----------
observations : list
List of Observations (as dicts or Observations.)
If they're dicts, this will convert them to Observations.
data_root : str or None
Path to look for an observation, if not None
"""
self._observations = [Observation(**_enforce_obs(x, audio_root,
strict))
for x in observations]
self.audio_root = audio_root
self.strict = strict
def __eq__(self, a):
is_eq = False
if hasattr(a, 'to_builtin'):
is_eq = self.to_builtin() == a.to_builtin()
return is_eq
def __len__(self):
return len(self.values())
def __getitem__(self, n):
"""Return the observation for a given integer index."""
return self._observations[n]
def items(self):
return [(v.index, v) for v in self.values()]
def values(self):
return self._observations
def keys(self):
return [v.index for v in self.values()]
def append(self, observation, audio_root=None):
audio_root = self.audio_root if audio_root is None else audio_root
obs = _enforce_obs(observation, audio_root, self.strict)
self._observations += [Observation(**obs)]
def to_builtin(self):
return [v.to_builtin() for v in self.values()]
@classmethod
def read_json(cls, json_path, audio_root=''):
with open(json_path, 'r') as fh:
return cls(json.load(fh), audio_root=audio_root)
def to_json(self, json_path=None, **kwargs):
"""Pandas-like `to_json` method.
Parameters
----------
json_path : str, or None
If given, will attempt to write JSON to disk; else returns a string
of serialized data.
**kwargs : keyword args
Pass-through parameters to the JSON serializer.
"""
sdata = json.dumps(self.to_builtin(), **kwargs)
if json_path is not None:
with open(json_path, 'w') as fh:
fh.write(sdata)
else:
return sdata
def validate(self, verbose=False, check_files=True):
"""Returns True if all are valid."""
return all([x.validate(verbose=verbose, check_files=check_files)
for x in self.values()])
def to_dataframe(self):
return pd.DataFrame([x.to_series() for x in self.values()])
@classmethod
def from_dataframe(cls, dframe, audio_root=''):
return cls([Observation.from_series(x) for _, x in dframe.iterrows()],
audio_root=audio_root)
def copy(self, deep=True):
return Collection(copy.deepcopy(self._observations))
def view(self, column, filter_value):
"""Returns a copy of the collection restricted to the filter value.
Parameters
----------
column : str
Name of the column for filtering.
filter_value : obj
Value to restrict the collection.
Returns
-------
"""
thecopy = copy.copy(self.to_dataframe())
ds_view = thecopy[thecopy[column] == filter_value]
return Collection.from_dataframe(ds_view, self.audio_root)
def load(filename, audio_root):
"""
"""
return Collection.load(filename)
def partition_collection(collection, test_set, train_val_split=0.2,
max_files_per_class=None):
"""Returns Datasets for train and validation constructed
from the datasets not in the test_set, and split with
the ratio train_val_split.
* First selects from only the datasets given in datasets.
* Then **for each instrument** (so the distribution from
each instrument doesn't change)
* train_test_split to generate training and validation sets.
* if max_files_per_class, also then restrict the training set to
a maximum of that number of files for each train and test
Parameters
----------
test_set : str
String in ["rwc", "uiowa", "philharmonia"] which selects
the hold-out-set to be used for testing.
Returns
-------
partition_df : pd.DataFrame
DataFrame with only an index to the original table, and
the partiition in ['train', 'valid', 'test']
"""
df = collection.to_dataframe()
test_df = collection.view(
column='dataset', filter_value=test_set).to_dataframe()
datasets = set(df["dataset"].unique()) - set([test_set])
search_df = df[df["dataset"].isin(datasets)]
selected_instruments_train = []
selected_instruments_valid = []
for instrument in search_df["instrument"].unique():
instrument_df = search_df[search_df["instrument"] == instrument]
if len(instrument_df) < 2:
logger.warning("Instrument {} doesn't haven enough samples "
"to split.".format(instrument))
continue
groups = instrument_df.groupby(['source_index'])
train_grps, valid_grps = train_test_split(
list(groups), test_size=train_val_split)
# Groups get backed out as (source_index, dataframe) tuples, so stick
# these back together now that they've been partitioned.
traindf = pd.concat(x[1] for x in train_grps)
validdf = pd.concat(x[1] for x in valid_grps)
if max_files_per_class:
replace = False if len(traindf) > max_files_per_class else True
traindf = traindf.sample(n=max_files_per_class,
replace=replace)
selected_instruments_train.append(traindf)
selected_instruments_valid.append(validdf)
train_df = pd.concat(selected_instruments_train)
valid_df = pd.concat(selected_instruments_valid)
# Create the final dataframe
partition = (['train'] * len(train_df) +
['valid'] * len(valid_df) +
['test'] * len(test_df))
index = (train_df.index.tolist() +
valid_df.index.tolist() +
test_df.index.tolist())
result = pd.DataFrame(partition,
columns=['partition'],
index=index)
return result
| 2.59375
| 3
|
Curso_em_Video_Exercicios/ex067.py
|
Cohuzer/Exercicios-do-Curso-em-Video
| 0
|
12782743
|
<filename>Curso_em_Video_Exercicios/ex067.py
#Tabauada dos números digitados pelo úsuario- para qnd o valor for negativo
while True:
print('-' * 30)
n = int(input('QUAL TABUADA VOCÊ DESEJA VER? '))
print('-' * 30)
c = 0
if n < 0:
break
while c != 11:
print(f'{n} X {c} = {n * c}')
c += 1
print('FIM DO PROCESSO')
| 3.609375
| 4
|
mlmodels/model_tf/misc/tf_nlp/language-detection/1.fast-text-ngrams.py
|
gitter-badger/mlmodels
| 1
|
12782744
|
<filename>mlmodels/model_tf/misc/tf_nlp/language-detection/1.fast-text-ngrams.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import re
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import metrics
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
# In[2]:
lang = pd.read_csv("sentences.csv", sep="\t")
lang = lang.dropna()
lang.head()
# In[3]:
X, Y = [], []
for no, ln in enumerate(lang.cmn.unique()):
langs = lang.loc[lang.cmn == ln]
if langs.shape[0] < 500:
continue
print(no, ln)
langs = langs.iloc[:500, -1].tolist()
X.extend(langs)
Y.extend([ln] * len(langs))
# In[4]:
def clean_text(string):
string = re.sub("[0-9!@#$%^&*()_\-+{}|\~`'\";:?/.>,<]", " ", string.lower(), flags=re.UNICODE)
return re.sub(r"[ ]+", " ", string.lower()).strip()
# In[5]:
X = [clean_text(s) for s in X]
# In[6]:
bow_chars = CountVectorizer(ngram_range=(3, 5), analyzer="char_wb", max_features=700000).fit(X)
delattr(bow_chars, "stop_words_")
target = LabelEncoder().fit_transform(Y)
features = bow_chars.transform(X)
features.shape
# In[7]:
train_X, test_X, train_Y, test_Y = train_test_split(features, target, test_size=0.2)
del features
# In[8]:
# In[9]:
def convert_sparse_matrix_to_sparse_tensor(X, limit=5):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
coo.data[coo.data > limit] = limit
return (
tf.SparseTensorValue(indices, coo.col, coo.shape),
tf.SparseTensorValue(indices, coo.data, coo.shape),
)
# In[10]:
labels = np.unique(Y, return_counts=True)[0]
labels
# In[11]:
class Model:
def __init__(self, learning_rate):
self.X = tf.sparse_placeholder(tf.int32)
self.W = tf.sparse_placeholder(tf.int32)
self.Y = tf.placeholder(tf.int32, [None])
embeddings = tf.Variable(tf.truncated_normal([train_X.shape[1], 64]))
embed = tf.nn.embedding_lookup_sparse(embeddings, self.X, self.W, combiner="mean")
self.logits = tf.layers.dense(embed, len(labels))
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)
)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
correct_pred = tf.equal(tf.argmax(self.logits, 1, output_type=tf.int32), self.Y)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# In[12]:
sess = tf.InteractiveSession()
model = Model(1e-4)
sess.run(tf.global_variables_initializer())
# In[13]:
batch_size = 64
for e in range(50):
lasttime = time.time()
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(range(0, train_X.shape[0], batch_size), desc="train minibatch loop")
for i in pbar:
batch_x = convert_sparse_matrix_to_sparse_tensor(
train_X[i : min(i + batch_size, train_X.shape[0])]
)
batch_y = train_Y[i : min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={model.X: batch_x[0], model.W: batch_x[1], model.Y: batch_y},
)
assert not np.isnan(cost)
train_loss += cost
train_acc += acc
pbar.set_postfix(cost=cost, accuracy=acc)
pbar = tqdm(range(0, test_X.shape[0], batch_size), desc="test minibatch loop")
for i in pbar:
batch_x = convert_sparse_matrix_to_sparse_tensor(
test_X[i : min(i + batch_size, test_X.shape[0])]
)
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])]
batch_x_expand = np.expand_dims(batch_x, axis=1)
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={model.X: batch_x[0], model.W: batch_x[1], model.Y: batch_y},
)
test_loss += cost
test_acc += acc
pbar.set_postfix(cost=cost, accuracy=acc)
train_loss /= train_X.shape[0] / batch_size
train_acc /= train_X.shape[0] / batch_size
test_loss /= test_X.shape[0] / batch_size
test_acc /= test_X.shape[0] / batch_size
print("time taken:", time.time() - lasttime)
print(
"epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n"
% (e, train_loss, train_acc, test_loss, test_acc)
)
# In[14]:
real_Y, predict_Y = [], []
pbar = tqdm(range(0, test_X.shape[0], batch_size), desc="validation minibatch loop")
for i in pbar:
batch_x = convert_sparse_matrix_to_sparse_tensor(
test_X[i : min(i + batch_size, test_X.shape[0])]
)
batch_y = test_Y[i : min(i + batch_size, test_X.shape[0])].tolist()
predict_Y += np.argmax(
sess.run(
model.logits, feed_dict={model.X: batch_x[0], model.W: batch_x[1], model.Y: batch_y}
),
1,
).tolist()
real_Y += batch_y
# In[15]:
print(metrics.classification_report(real_Y, predict_Y, target_names=labels))
| 2.640625
| 3
|
app/models/group.py
|
Saevon/PersOA
| 2
|
12782745
|
<reponame>Saevon/PersOA
from django.db import models
from app.constants.database import MAX_CHAR_LENGTH
from app.models.abstract import AbstractPersOAModel
from itertools import chain
from utils.decorators import seeded
class TraitGroup(AbstractPersOAModel):
"""
A grouping for Traits.
"""
name = models.CharField(
max_length=MAX_CHAR_LENGTH,
blank=False,
unique=True)
desc = models.TextField(blank=True)
basic_traits = models.ManyToManyField(
'BasicTrait',
related_name='groups',
blank=True
)
linear_traits = models.ManyToManyField(
'LinearTrait',
related_name='groups',
blank=True,
)
def __unicode__(self):
return unicode(self.name)
@property
def traits(self):
""""
Combines the list of Traits
"""
return chain(self.basic_traits.all(), self.linear_traits.all())
@seeded(2)
def generate(self, num=None, seed=None, include=None):
"""
Returns a choice for each of the groupings traits
"""
if num is None:
num = 1
groups = []
for i in range(num):
group = {}
for trait in self.traits:
group[trait.name] = [
i.details(include)
for i in trait.generate(seed=seed)
]
groups.append(group)
return group
def details(self, include=None):
"""
Returns a dict with the choice's details
"""
details = self.data()
if include is None:
pass
elif include['group_name']:
return self.name
include['group'] = False
if include['trait']:
details.update({
'traits': [trait.details(include) for trait in self.traits]
})
return details
def data(self):
"""
Returns a dict with the basic details
"""
return {
'name': self.name,
'desc': self.desc,
}
| 2.453125
| 2
|
tests/test_tutorial/importers/test_tutorial001.py
|
ta4tsering/openpecha-toolkit
| 1
|
12782746
|
from pathlib import Path
from docs_src.importers.hfml.tutorial001 import result
def test_hfml_base():
output_fn = Path("tests") / "formatters" / "hfml" / "data" / "kangyur_base.txt"
expected = output_fn.read_text()
assert result == expected
| 2.1875
| 2
|
lib/mapper/label.py
|
hashnfv/hashnfv-domino
| 0
|
12782747
|
#!/usr/bin/env python
#
# Licence statement goes here
#
#from toscaparser.tosca_template import ToscaTemplate
#Current version:
#Parses policy rules, extracts targets, extracts policy properties
#Returns set of policy properties for each target in a dictionary object
#e.g., node_labels['VNF1'] = {label1, label2, ..., labeln}
def extract_labels(tpl):
node_labels = dict() #stores labels for each node
if tpl.has_key('topology_template'):
if tpl['topology_template'].has_key('policies'):
policies = tpl['topology_template']['policies']
else:
return node_labels
else:
return node_labels
#extract label sets for each policy target
for p in policies:
for rule in p:
targetlist = p[rule]['targets']
for props in p[rule]['properties']:
prop_list = p[rule]['properties'][props]
for values in prop_list:
labelkey = p[rule]['type']+ ':properties:' + props + ":" + values
for target in targetlist:
if node_labels.has_key(target):
node_labels[target].update(set([labelkey]))
else:
node_labels[target] = set([labelkey])
return node_labels
# Returns a map from nodes to regions based on label matching
def map_nodes(site_labels,node_labels):
sitemap = dict() #stores mapping
#for each target find a map of sites
for node in node_labels:
sitemap[node] = set()
for site in site_labels:
if node_labels[node].issubset(site_labels[site]):
sitemap[node].add(site)
return sitemap
# Selects sites for nodes if multiple candidates exist
# First iterate for nodes with single candidate site
# Rank sites with most nodes higher
def select_site( site_map ):
node_site = dict()
counter = dict()
#SHALL I CHECK IF ANY KEY HAS AN EMPTY SET TO THROW EXCEPTION?
#For now, I assume input as safe
for node in site_map:
node_site[node] = []
if len(site_map[node]) == 1:
for site in site_map[node]:
node_site[node] = site
if counter.has_key(site):
counter[site] = counter[site] + 1
else:
counter[site] = 1
for node in site_map:
if len(site_map[node]) > 1:
maxval = 0
maxkey = '-1'
for site in site_map[node]:
if counter.has_key(site) and counter[site] >= maxval:
maxval = counter[site]
maxkey = site
elif counter.has_key(site) == False:
counter[site] = 0
if maxval == 0:
maxval = 1
maxkey = site
node_site[node] = maxkey
counter[node_site[node]] = counter[node_site[node]] + 1
return node_site
| 2.40625
| 2
|
main/codeSamples/DataStructures/binary_trees/0x00-python-binary_trees/0-binary_tree.py
|
JKUATSES/dataStructuresAlgorithms
| 0
|
12782748
|
<gh_stars>0
#!/usr/bin/python3
"""0-binary_tree module defines classes for creating
and interacting with a binary tree
"""
class Node:
""" Class Node defines the structure of a single node of a binary tree
Attributes:
__data: Int value held by the node
__left: Pointer to the left child of a parent/root
__right: Pointer to the right child of parent/root
"""
def __init__(self, data=None, left=None, right=None):
"""This method initializes a single node.
Args:
left (Node): This is a pointer to the left child of a parent/root
data (Any): This defines the data that the node holds
right (Node): This is a pointer to the right child of a parent/root
"""
if type(data) is not int:
raise TypeError("Data must be an integer value")
elif (not isinstance(left, Node) and left is not None):
raise TypeError("left must be an instance of Node")
elif (not isinstance(right, Node) and right is not None):
raise TypeError("right must be an instance of Node")
else:
self.__left = left
self.__data = data
self.__right = right
@property
def left(self):
"""This method returns the left child of parent/root."""
return (self.__left)
@left.setter
def left(self, left=None):
"""This method sets left child of parent/root.
Args:
left (Node): This is a pointer to the left child
of a parent/root
Raises:
TypeError: When arguement is not of type Node
"""
if (not isinstance(left, Node) and left is not None):
raise TypeError("left must be an instance of Node")
self.__left = left
@property
def data(self):
"""This method returns the data held by the node"""
return (self.__data)
@data.setter
def data(self, data=None):
"""This method sets the data of the node.
Args:
data (int): This holds the data of the node.
Raises:
TypeError: When argument is not of type int
"""
if type(data) is not int:
raise TypeError("Data must be an integer value")
self.__data = data
@property
def right(self):
"""This method returns the right child of parent/root."""
return (self.__right)
@right.setter
def right(self, right=None):
"""This method sets right child of parent/root.
Args:
right_pointer (Node): This is a pointer to the right child
of a parent/root
Raises:
TypeError: When arguement is not of type Node
"""
if (not isinstance(right, Node) and right is not None):
raise TypeError("right must be an instance of Node")
self.__right = right
class BinaryTree:
""" Class BinaryTree provides methods for creating, manipulating
and traversing a binary tree.
This class has no public class attributes
"""
def __init__(self):
""" This method initializes a single node """
self.root = None
def add(self, data):
"""This method creates the root of the binary tree if the root is None.
Otherwise, it calls the _add() method
and supplies it with the required arguement
Args:
data (Int): An integer value
"""
if(self.root == None):
self.root = Node(data)
else:
self._add(data, self.root)
def _add(self, data, node):
""" This method adds a node to the the binary tree.
Args:
data (Int): An integer value
node (Node): A node in the tree
"""
if(data < node.data):
if(node.left != None):
self._add(data, node.left)
else:
node.left = Node(data)
else:
if(node.right != None):
self._add(data, node.right)
else:
node.right = Node(data)
def delete_binary_tree(self):
""" This method deletes a binary tree """
self.root = None
def is_empty(self):
""" This method checks whether the binary tree is empty"""
return (self.root == None)
def print_tree(self):
""" This method prints calls the _print_tree() method
if the root of the tree is not None
"""
if(self.root != None):
self._print_tree(self.root)
else:
print("The binary tree is empty")
def _print_tree(self, node):
""" This method recursively prints the values of the nodes in the tree
Args:
node (Node): A node in the tree
"""
if (node != None):
self._print_tree(node.left)
print("{:d} is a value held in a node".format(node.data))
self._print_tree(node.right)
def find(self, data):
""" This calls the _find() method if the tree is not empty
Args:
data (Int): Integer value
"""
if type(data) is not int:
raise TypeError("Data must be an integer value")
if(self.root != None):
self._find(data, self.root)
else:
print("Found None")
def _find(self, data, node):
""" This method recursively searches the tree for the arguement supplied
Args:
data (Int): Integer value
node (Node): A node in the tree
"""
if (data == node.data):
print("Found {:d} in {}".format(node.data, node))
elif (data < node.data and node.left != None):
self._find(data, node.left)
elif (data > node.data and node.right != None):
self._find(data, node.right)
def main():
binary_tree = BinaryTree()
binary_tree.add(4)
binary_tree.add(5)
binary_tree.add(3)
binary_tree.add(7)
binary_tree.add(13)
binary_tree.add(10)
binary_tree.add(2)
binary_tree.add(9)
binary_tree.print_tree()
binary_tree.find(10)
if __name__ == '__main__':
main()
| 4.25
| 4
|
python_collector/peimar/inverter/config.py
|
cislow970/raspberry-solar-mon
| 6
|
12782749
|
import array as arr
import pytz
# Owned
__project__ = "peimar"
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.0.4"
__date__ = "02/11/2021"
__email__ = "<<EMAIL>>"
# Inverter Web Server
inverter_server = "192.168.1.8"
inverter_port = 80
# Inverter metric decimals
cf = arr.array('I', [0, 2, 1, 2, 1, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1])
# Inverter timezone
timezone = pytz.timezone("Europe/Rome")
# Production time slot
start_hour = '07'
start_minute = '30'
end_hour = '19'
end_minute = '00'
dst_start_hour = '06'
dst_start_minute = '00'
dst_end_hour = '21'
dst_end_minute = '30'
# InfluxDB Connection
influxdb_host = "localhost"
influxdb_port = 8086
influxdb_repo = "peimar"
influxdb_user = "peimar"
influxdb_pwd = "<PASSWORD>"
# Log path
logdefaultpath = "/var/log/peimar/default.log"
loginfopath = "/var/log/peimar/info.log"
| 2.28125
| 2
|
AD14-flask-admin-backup-demo/app/admin_backup/__init__.py
|
AngelLiang/Flask-Demos
| 3
|
12782750
|
import os
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from .backup import Backup
from .serializer import Serializer
from .autoclean import BackupAutoClean
from .mixins import AdminBackupModelViewMixin
from .fileadmin import BackupFileAdmin
class FlaskAdminBackup:
def __init__(self, app=None, db=None, admin=None):
if app is not None:
self.init_app(app)
def init_app(self, app, db=None, admin=None, backup=None, serializer=None):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['flask-admin-backup'] = self
app.config.setdefault('ADMIN_BACKUP_FOLDER_NAME', 'databackup')
app.config.setdefault('ADMIN_BACKUP_PATH', os.getcwd())
app.config.setdefault('ADMIN_BACKUP_PREFIX', 'db-bkp')
app.config.setdefault('ADMIN_BACKUP_FILEADMIN_NAME', 'Backup')
self.app = app
self.db = db
self.fileadmin_name = self.app.config['ADMIN_BACKUP_FILEADMIN_NAME']
self.prefix = self.app.config['ADMIN_BACKUP_PREFIX']
self.folder_path = os.path.join(
self.app.config['ADMIN_BACKUP_PATH'],
self.app.config['ADMIN_BACKUP_FOLDER_NAME'])
self.backup = backup or Backup(
path=self.folder_path, prefix=self.prefix)
self.target = self.backup.get_target()
self.serializer = serializer or Serializer(db=db)
if admin:
self.add_file_view(admin)
def add_file_view(self, admin):
admin.add_view(BackupFileAdmin(
self.folder_path,
name=self.fileadmin_name))
def create(self, class_name, contents):
"""备份数据
:param class_name: str,
:param contents: list,
:return: bool
"""
data = self.serializer.dump_data(contents)
filename = self.backup.generate_name(class_name) # 生成文件名称
full_path = self.target.create_file(filename, data)
rows = len(self.serializer.load_data(data))
if full_path:
print('==> {} rows from {} saved as {}'.format(
rows, class_name, full_path))
return True
else:
print('==> Error creating {} at {}'.format(
filename, self.target.path))
return False
def restore(self, path):
"""恢复数据
:param path: 备份文件路径
"""
contents = self.target.read_file(path)
successes = []
fails = []
db = self.db
rows = self.serializer.load_data(contents)
for row in rows:
try:
db.session.merge(row) # 使用了 db.session.merge
db.session.commit() # 是否可以换成 flush ?
successes.append(row)
except (IntegrityError, InvalidRequestError):
db.session.rollback()
fails.append(row)
return successes, fails
def autoclean(self):
"""
Remove a series of backup files based on the following rules:
* Keeps all the backups from the last 7 days
* Keeps the most recent backup from each week of the last month
* Keeps the most recent backup from each month of the last year
* Keeps the most recent backup from each year of the remaining years
"""
backup = self.backup
backup.files = tuple(backup.target.get_files())
if not backup.files:
print('==> No backups found.')
return None
cleaning = BackupAutoClean(backup.get_timestamps())
white_list = cleaning.white_list
black_list = cleaning.black_list
if not black_list:
print('==> No backup to be deleted.')
return None
| 2.25
| 2
|