content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import RegexValidator
class Loja(models.Model):
"""
Model responsável por representar uma Loja Física
"""
#criar uma app para shopping e vincular uma loja a um shopping.
user = models.OneToOneField(User)
nome_loja = models.CharField("Nome da Loja", max_length=250)
email_loja = models.EmailField("Email da Loja", max_length=250)
site_loja = models.URLField("Site", max_length=250, blank=True)
cnpj_loja = models.CharField("CNPJ",max_length=20, validators=[RegexValidator(regex=r'^\d{2}\.\d{3}\.\d{3}\/\d{4}\-\d{2}$', message='CNPJ Inválido'),])
representante_loja = models.CharField("Representante da Loja", max_length=250)
representante_telefone = models.CharField("Telefone do Representante", max_length=15, validators=[RegexValidator(regex=r'^\((10)|([1-9][1-9])\)[2-9][0-9]{3}-[0-9]{4}|[2-9][0-9]{4}-[0-9]{4}$', message='Número de Telefone Inválido'),])
estado_loja = models.CharField("Estado", max_length=250)
cidade_loja = models.CharField("Cidade", max_length=250)
bairro_loja = models.CharField("Bairro", max_length=250)
cep_loja = models.CharField("Cep", max_length=10, validators=[RegexValidator(regex=r'^[0-9]{5}-[0-9]{3}$', message='CEP Inválido'),])
centro = models.ForeignKey('centro_comercial.CentroComercial', verbose_name="Centro Comercial")
class Meta:
verbose_name_plural = "Lojas"
verbose_name = "Loja"
def __unicode__(self):
return self.nome_loja |
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
_product_name = 'nfvbench'
def setup(mute_stdout=False):
# logging.basicConfig()
if mute_stdout:
handler = logging.NullHandler()
else:
formatter_str = '%(asctime)s %(levelname)s %(message)s'
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(formatter_str))
# Add handler to logger
logger = logging.getLogger(_product_name)
logger.addHandler(handler)
# disable unnecessary information capture
logging.logThreads = 0
logging.logProcesses = 0
# to make sure each log record does not have a source file name attached
# pylint: disable=protected-access
logging._srcfile = None
# pylint: enable=protected-access
def add_file_logger(logfile):
if logfile:
file_formatter_str = '%(asctime)s %(levelname)s %(message)s'
file_handler = logging.FileHandler(logfile, mode='w')
file_handler.setFormatter(logging.Formatter(file_formatter_str))
logger = logging.getLogger(_product_name)
logger.addHandler(file_handler)
def set_level(debug=False):
log_level = logging.DEBUG if debug else logging.INFO
logger = logging.getLogger(_product_name)
logger.setLevel(log_level)
def getLogger():
logger = logging.getLogger(_product_name)
return logger
LOG = getLogger()
|
# Import libs
import os
return getmarkdown(mod)
# Taken from utils.sys_utils
def get_all_directories_from_path(directory_path):
"""
directory_path:
Given path that already exists.
Returns:
Returns back a set a directories with the provided path.
"""
dirs_in_paths = []
for (dirpath, dirnames, filenames) in os.walk(directory_path):
dirs_in_paths.extend(dirnames)
break
return set(dirs_in_paths)
def get_all_files_from_path(directory_path,
file_extension=None):
"""
directory_path:
Given path that already exists.
file_extension:
Only return files that have a given extension.
Returns:
Returns back a set a filenames with the provided path.
"""
files_in_paths = []
for (dirpath, dirnames, filenames) in os.walk(directory_path):
if file_extension:
file_extension = file_extension.replace(".","")
for file in filenames:
if file.endswith(f'.{file_extension}'):
files_in_paths.append(file)
else:
files_in_paths.extend(filenames)
break
return set(files_in_paths)
# Get the current working directory
current_work_dir = os.getcwd()
project_dir = current_work_dir[:current_work_dir.rfind('/')] + "/eflow/"
# Get all directories from project
all_dirs = get_all_directories_from_path(project_dir)
for dir_name in all_dirs:
# Ignore any hidden files
if dir_name[0] == "_":
continue
# Ignore utils for now
if dir_name == "utils":
continue
dir_files = get_all_files_from_path(project_dir + dir_name,
"py")
print(dir_files)
for file_name in dir_files:
print(file_name)
# Ignore hidden file
if file_name[0] == "_":
continue
def_start = False
with open(f'{project_dir}{dir_name}/{file_name}') as fp:
line = fp.readline()
while line:
line = fp.readline()
if line == "":
# Create template
if "# def " in line or "#def ":
continue
if ("def " in line and "def _" not in line) or def_start:
def_start = True
if "):" in line:
def_start = False
print(line)
break
break |
import re
from vee.requirement import RequirementSet
class SolveError(ValueError):
pass
def verbose(depth, step, *args):
print('{}{}'.format(' ' * depth, step), *args)
def solve(*args, **kwargs):
"""Solve abstract requirements into concrete provisions.
:param RequirementSet requires: The abstract requirements to solve.
:param Manifest manifest: Where to pull packages from.
:return dict: Concrete provisions that satisfy the abstract requirements.
:raises SolveError: if unable to satisfy the requirements.
"""
return next(iter_solve(*args, **kwargs), None)
def iter_solve(requires, manifest, log=None):
done = {}
todo = list(RequirementSet.coerce(requires).items())
log = log or (lambda *args: None)
return _solve(done, todo, manifest, log)
def _solve(done, todo, manifest, log, depth=0):
if not todo:
yield done
return
name, reqs = todo[0]
pkg = manifest.get(name)
if pkg is None:
raise SolveError("package {!r} does not exist".format(name))
log(depth, 'start', name, pkg)
variants = pkg.flattened()
for vi, var in enumerate(variants):
log(depth, 'variant', vi, len(variants), var)
failed = False
# Make sure it satisfies all solved requirements.
for prev in done.values():
req = prev.requires.get(name)
if req and not var.provides.satisfies(req):
log(depth, 'fail on existing', prev.name, req)
failed = True
break
if failed:
continue
next_todo = []
for name2, req in var.requires.items():
pkg2 = done.get(name2)
log(depth, 'requires', name2, req, pkg2)
if pkg2 is None:
# We need to solve this.
# We don't grab it immediately, because we want to do a
# breadth-first search.
log(depth, 'to solve')
next_todo.append((name2, req))
elif not pkg2.provides.satisfies(req):
# This variant doesn't work with the already done packages;
# move onto the next one.
log(depth, 'fail variant')
failed = True
break
if failed:
continue
# Go a step deeper.
# We clone everything so that the call stack maintains the state we
# need to keep going from here.
next_done = done.copy()
next_done[name] = var
next_todo = todo[1:] + next_todo
yield from _solve(next_done, next_todo, manifest, log, depth + 1)
|
stocks = {
'GOOG': 234.43,
'FB': 132.54,
'AAPL': 543.21,
'YAHOO': 23.34
}
# get the minimum value
print(min(zip(stocks.values(), stocks.keys())))
# get the maximum value
print(max(zip(stocks.values(), stocks.keys())))
# sort the dictionary prices
print(sorted(zip(stocks.values(), stocks.keys())))
# sort the dictionary keys alphabatically
print(sorted(zip(stocks.keys(), stocks.values())))
|
# coding: utf-8
import pprint
import re
import six
class ListImagesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'imagetype': 'str',
'protected': 'str',
'id': 'str',
'visibility': 'str',
'status': 'str',
'name': 'str',
'os_type': 'str',
'virtual_env_type': 'str',
'isregistered': 'str',
'limit': 'int',
'offset': 'int',
'sort_key': 'str',
'sort_dir': 'str',
'support_kvm': 'str',
'support_kvm_gpu_type': 'str',
'support_kvm_ascend_310': 'str',
'support_kvm_hi1822_hiovs': 'str',
'support_arm': 'str',
'support_gpu_t4': 'str'
}
attribute_map = {
'imagetype': '__imagetype',
'protected': 'protected',
'id': 'id',
'visibility': 'visibility',
'status': 'status',
'name': 'name',
'os_type': '__os_type',
'virtual_env_type': 'virtual_env_type',
'isregistered': '__isregistered',
'limit': 'limit',
'offset': 'offset',
'sort_key': 'sort_key',
'sort_dir': 'sort_dir',
'support_kvm': '__support_kvm',
'support_kvm_gpu_type': '__support_kvm_gpu_type',
'support_kvm_ascend_310': '__support_kvm_ascend_310',
'support_kvm_hi1822_hiovs': '__support_kvm_hi1822_hiovs',
'support_arm': '__support_arm',
'support_gpu_t4': '__support_gpu_t4'
}
def __init__(self, imagetype=None, protected=None, id=None, visibility=None, status=None, name=None, os_type=None, virtual_env_type=None, isregistered=None, limit=None, offset=None, sort_key=None, sort_dir=None, support_kvm=None, support_kvm_gpu_type=None, support_kvm_ascend_310=None, support_kvm_hi1822_hiovs=None, support_arm=None, support_gpu_t4=None):
"""ListImagesRequest - a model defined in huaweicloud sdk"""
self._imagetype = None
self._protected = None
self._id = None
self._visibility = None
self._status = None
self._name = None
self._os_type = None
self._virtual_env_type = None
self._isregistered = None
self._limit = None
self._offset = None
self._sort_key = None
self._sort_dir = None
self._support_kvm = None
self._support_kvm_gpu_type = None
self._support_kvm_ascend_310 = None
self._support_kvm_hi1822_hiovs = None
self._support_arm = None
self._support_gpu_t4 = None
self.discriminator = None
if imagetype is not None:
self.imagetype = imagetype
if protected is not None:
self.protected = protected
if id is not None:
self.id = id
if visibility is not None:
self.visibility = visibility
if status is not None:
self.status = status
if name is not None:
self.name = name
if os_type is not None:
self.os_type = os_type
if virtual_env_type is not None:
self.virtual_env_type = virtual_env_type
if isregistered is not None:
self.isregistered = isregistered
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if sort_key is not None:
self.sort_key = sort_key
if sort_dir is not None:
self.sort_dir = sort_dir
if support_kvm is not None:
self.support_kvm = support_kvm
if support_kvm_gpu_type is not None:
self.support_kvm_gpu_type = support_kvm_gpu_type
if support_kvm_ascend_310 is not None:
self.support_kvm_ascend_310 = support_kvm_ascend_310
if support_kvm_hi1822_hiovs is not None:
self.support_kvm_hi1822_hiovs = support_kvm_hi1822_hiovs
if support_arm is not None:
self.support_arm = support_arm
if support_gpu_t4 is not None:
self.support_gpu_t4 = support_gpu_t4
@property
def imagetype(self):
"""Gets the imagetype of this ListImagesRequest.
:return: The imagetype of this ListImagesRequest.
:rtype: str
"""
return self._imagetype
@imagetype.setter
def imagetype(self, imagetype):
"""Sets the imagetype of this ListImagesRequest.
:param imagetype: The imagetype of this ListImagesRequest.
:type: str
"""
self._imagetype = imagetype
@property
def protected(self):
"""Gets the protected of this ListImagesRequest.
:return: The protected of this ListImagesRequest.
:rtype: str
"""
return self._protected
@protected.setter
def protected(self, protected):
"""Sets the protected of this ListImagesRequest.
:param protected: The protected of this ListImagesRequest.
:type: str
"""
self._protected = protected
@property
def id(self):
"""Gets the id of this ListImagesRequest.
:return: The id of this ListImagesRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListImagesRequest.
:param id: The id of this ListImagesRequest.
:type: str
"""
self._id = id
@property
def visibility(self):
"""Gets the visibility of this ListImagesRequest.
:return: The visibility of this ListImagesRequest.
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this ListImagesRequest.
:param visibility: The visibility of this ListImagesRequest.
:type: str
"""
self._visibility = visibility
@property
def status(self):
"""Gets the status of this ListImagesRequest.
:return: The status of this ListImagesRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListImagesRequest.
:param status: The status of this ListImagesRequest.
:type: str
"""
self._status = status
@property
def name(self):
"""Gets the name of this ListImagesRequest.
:return: The name of this ListImagesRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListImagesRequest.
:param name: The name of this ListImagesRequest.
:type: str
"""
self._name = name
@property
def os_type(self):
"""Gets the os_type of this ListImagesRequest.
:return: The os_type of this ListImagesRequest.
:rtype: str
"""
return self._os_type
@os_type.setter
def os_type(self, os_type):
"""Sets the os_type of this ListImagesRequest.
:param os_type: The os_type of this ListImagesRequest.
:type: str
"""
self._os_type = os_type
@property
def virtual_env_type(self):
"""Gets the virtual_env_type of this ListImagesRequest.
:return: The virtual_env_type of this ListImagesRequest.
:rtype: str
"""
return self._virtual_env_type
@virtual_env_type.setter
def virtual_env_type(self, virtual_env_type):
"""Sets the virtual_env_type of this ListImagesRequest.
:param virtual_env_type: The virtual_env_type of this ListImagesRequest.
:type: str
"""
self._virtual_env_type = virtual_env_type
@property
def isregistered(self):
"""Gets the isregistered of this ListImagesRequest.
:return: The isregistered of this ListImagesRequest.
:rtype: str
"""
return self._isregistered
@isregistered.setter
def isregistered(self, isregistered):
"""Sets the isregistered of this ListImagesRequest.
:param isregistered: The isregistered of this ListImagesRequest.
:type: str
"""
self._isregistered = isregistered
@property
def limit(self):
"""Gets the limit of this ListImagesRequest.
:return: The limit of this ListImagesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListImagesRequest.
:param limit: The limit of this ListImagesRequest.
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListImagesRequest.
:return: The offset of this ListImagesRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListImagesRequest.
:param offset: The offset of this ListImagesRequest.
:type: int
"""
self._offset = offset
@property
def sort_key(self):
"""Gets the sort_key of this ListImagesRequest.
:return: The sort_key of this ListImagesRequest.
:rtype: str
"""
return self._sort_key
@sort_key.setter
def sort_key(self, sort_key):
"""Sets the sort_key of this ListImagesRequest.
:param sort_key: The sort_key of this ListImagesRequest.
:type: str
"""
self._sort_key = sort_key
@property
def sort_dir(self):
"""Gets the sort_dir of this ListImagesRequest.
:return: The sort_dir of this ListImagesRequest.
:rtype: str
"""
return self._sort_dir
@sort_dir.setter
def sort_dir(self, sort_dir):
"""Sets the sort_dir of this ListImagesRequest.
:param sort_dir: The sort_dir of this ListImagesRequest.
:type: str
"""
self._sort_dir = sort_dir
@property
def support_kvm(self):
"""Gets the support_kvm of this ListImagesRequest.
:return: The support_kvm of this ListImagesRequest.
:rtype: str
"""
return self._support_kvm
@support_kvm.setter
def support_kvm(self, support_kvm):
"""Sets the support_kvm of this ListImagesRequest.
:param support_kvm: The support_kvm of this ListImagesRequest.
:type: str
"""
self._support_kvm = support_kvm
@property
def support_kvm_gpu_type(self):
"""Gets the support_kvm_gpu_type of this ListImagesRequest.
:return: The support_kvm_gpu_type of this ListImagesRequest.
:rtype: str
"""
return self._support_kvm_gpu_type
@support_kvm_gpu_type.setter
def support_kvm_gpu_type(self, support_kvm_gpu_type):
"""Sets the support_kvm_gpu_type of this ListImagesRequest.
:param support_kvm_gpu_type: The support_kvm_gpu_type of this ListImagesRequest.
:type: str
"""
self._support_kvm_gpu_type = support_kvm_gpu_type
@property
def support_kvm_ascend_310(self):
"""Gets the support_kvm_ascend_310 of this ListImagesRequest.
:return: The support_kvm_ascend_310 of this ListImagesRequest.
:rtype: str
"""
return self._support_kvm_ascend_310
@support_kvm_ascend_310.setter
def support_kvm_ascend_310(self, support_kvm_ascend_310):
"""Sets the support_kvm_ascend_310 of this ListImagesRequest.
:param support_kvm_ascend_310: The support_kvm_ascend_310 of this ListImagesRequest.
:type: str
"""
self._support_kvm_ascend_310 = support_kvm_ascend_310
@property
def support_kvm_hi1822_hiovs(self):
"""Gets the support_kvm_hi1822_hiovs of this ListImagesRequest.
:return: The support_kvm_hi1822_hiovs of this ListImagesRequest.
:rtype: str
"""
return self._support_kvm_hi1822_hiovs
@support_kvm_hi1822_hiovs.setter
def support_kvm_hi1822_hiovs(self, support_kvm_hi1822_hiovs):
"""Sets the support_kvm_hi1822_hiovs of this ListImagesRequest.
:param support_kvm_hi1822_hiovs: The support_kvm_hi1822_hiovs of this ListImagesRequest.
:type: str
"""
self._support_kvm_hi1822_hiovs = support_kvm_hi1822_hiovs
@property
def support_arm(self):
"""Gets the support_arm of this ListImagesRequest.
:return: The support_arm of this ListImagesRequest.
:rtype: str
"""
return self._support_arm
@support_arm.setter
def support_arm(self, support_arm):
"""Sets the support_arm of this ListImagesRequest.
:param support_arm: The support_arm of this ListImagesRequest.
:type: str
"""
self._support_arm = support_arm
@property
def support_gpu_t4(self):
"""Gets the support_gpu_t4 of this ListImagesRequest.
:return: The support_gpu_t4 of this ListImagesRequest.
:rtype: str
"""
return self._support_gpu_t4
@support_gpu_t4.setter
def support_gpu_t4(self, support_gpu_t4):
"""Sets the support_gpu_t4 of this ListImagesRequest.
:param support_gpu_t4: The support_gpu_t4 of this ListImagesRequest.
:type: str
"""
self._support_gpu_t4 = support_gpu_t4
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListImagesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import pytest
pytest.importorskip('eth.utils.blake') # noqa E402
from eth.utils.blake import blake
from tests.core.helpers import (
greater_equal_python36,
)
@greater_equal_python36
def test_blake():
output = blake(b'helloworld')
assert len(output) == 32
assert output == b'\xf2@\xa8\x02\x04\x1b_\xaf\x89E\x02\xd42I\xe0\x80\xd5\xd3\xf7\xe2\xd4Q\xf2\xcf\xc9;#|\xb5\xd2\xeeo' # noqa: E501
|
import os
import shutil
import six
import pytest
import numpy as np
from pyshac.config import hyperparameters as hp, data
# compatible with both Python 2 and 3
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def deterministic_test(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
np.random.seed(0)
output = func(*args, **kwargs)
np.random.seed(None)
return output
return wrapper
# wrapper function to clean up saved files
def cleanup_dirs(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
# remove temporary files
if os.path.exists('shac/'):
shutil.rmtree('shac/')
if os.path.exists('custom/'):
shutil.rmtree('custom/')
return output
return wrapper
def get_hyperparameter_list():
h1 = hp.DiscreteHyperParameter('h1', [0, 1, 2])
h2 = hp.DiscreteHyperParameter('h2', [3, 4, 5, 6])
h3 = hp.UniformContinuousHyperParameter('h3', 7, 10)
h4 = hp.DiscreteHyperParameter('h4', ['v1', 'v2'])
return [h1, h2, h3, h4]
def get_multi_parameter_list():
h1 = hp.MultiDiscreteHyperParameter('h1', [0, 1, 2], sample_count=2)
h2 = hp.MultiDiscreteHyperParameter('h2', [3, 4, 5, 6], sample_count=3)
h3 = hp.MultiUniformContinuousHyperParameter('h3', 7, 10, sample_count=5)
h4 = hp.MultiDiscreteHyperParameter('h4', ['v1', 'v2'], sample_count=4)
return [h1, h2, h3, h4]
@cleanup_dirs
def test_dataset_param_list():
params = get_hyperparameter_list()
dataset = data.Dataset(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
dataset.set_parameters(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
h = hp.HyperParameterList(params)
dataset.set_parameters(h)
assert isinstance(dataset._parameters, hp.HyperParameterList)
@cleanup_dirs
def test_dataset_multi_param_list():
params = get_multi_parameter_list()
dataset = data.Dataset(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
dataset.set_parameters(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
h = hp.HyperParameterList(params)
dataset.set_parameters(h)
assert isinstance(dataset._parameters, hp.HyperParameterList)
@cleanup_dirs
def test_dataset_basedir():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
assert os.path.exists(dataset.basedir)
@cleanup_dirs
def test_dataset_basedir_custom():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h, basedir='custom')
assert os.path.exists(dataset.basedir)
assert not os.path.exists('shac')
@cleanup_dirs
def test_dataset_add_sample():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
x, y = dataset.get_dataset()
assert len(dataset) == 5
assert x.shape == (5, 4)
assert y.shape == (5,)
@cleanup_dirs
def test_dataset_multi_add_sample():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
x, y = dataset.get_dataset()
assert len(dataset) == 5
assert x.shape == (5, 14)
assert y.shape == (5,)
@cleanup_dirs
def test_set_dataset():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
# numpy arrays
samples = [(np.array(h.sample()), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples)
x = np.array(x)
y = np.array(y)
dataset.set_dataset(x, y)
assert len(dataset) == 5
dataset.clear()
# python arrays
samples = [(h.sample(), float(np.random.uniform())) for _ in range(5)]
x, y = zip(*samples)
dataset.set_dataset(x, y)
assert len(dataset) == 5
# None data
with pytest.raises(TypeError):
dataset.set_dataset(None, int(6))
with pytest.raises(TypeError):
dataset.set_dataset([1, 2, 3], None)
with pytest.raises(TypeError):
dataset.set_dataset(None, None)
@cleanup_dirs
def test_multi_set_dataset():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
# numpy arrays
samples = [(np.array(h.sample()), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples)
x = np.array(x)
y = np.array(y)
dataset.set_dataset(x, y)
assert len(dataset) == 5
dataset.clear()
# python arrays
samples = [(h.sample(), float(np.random.uniform())) for _ in range(5)]
x, y = zip(*samples)
dataset.set_dataset(x, y)
assert len(dataset) == 5
# None data
with pytest.raises(TypeError):
dataset.set_dataset(None, int(6))
with pytest.raises(TypeError):
dataset.set_dataset([1, 2, 3], None)
with pytest.raises(TypeError):
dataset.set_dataset(None, None)
@cleanup_dirs
@deterministic_test
def test_dataset_get_best_parameters():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
with pytest.raises(ValueError):
dataset.get_best_parameters(None)
# Test with empty dataset
assert dataset.get_best_parameters() is None
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
objective_values = [v for h, v in samples]
min_index = np.argmin(objective_values)
max_index = np.argmax(objective_values)
max_hp = list(dataset.get_best_parameters(objective='max').values())
min_hp = list(dataset.get_best_parameters(objective='min').values())
assert max_hp == samples[max_index][0]
assert min_hp == samples[min_index][0]
@cleanup_dirs
@deterministic_test
def test_dataset_multi_get_best_parameters():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
with pytest.raises(ValueError):
dataset.get_best_parameters(None)
# Test with empty dataset
assert dataset.get_best_parameters() is None
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
objective_values = [v for h, v in samples]
min_index = np.argmin(objective_values)
max_index = np.argmax(objective_values)
max_hp = data.flatten_parameters(dataset.get_best_parameters(objective='max'))
min_hp = data.flatten_parameters(dataset.get_best_parameters(objective='min'))
assert max_hp == samples[max_index][0]
assert min_hp == samples[min_index][0]
@cleanup_dirs
def test_dataset_parameters():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
assert len(params) == len(dataset.parameters)
dataset.parameters = params
assert len(params) == len(dataset.parameters)
@cleanup_dirs
def test_dataset_serialization_deserialization():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('shac', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
dataset3 = data.Dataset.load_from_directory()
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
# serialization of empty get_dataset
dataset = data.Dataset()
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
def test_dataset_multi_serialization_deserialization():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('shac', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
dataset3 = data.Dataset.load_from_directory()
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
# serialization of empty get_dataset
dataset = data.Dataset()
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
def test_dataset_serialization_deserialization_custom_basepath():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h, basedir='custom')
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('custom', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
dataset3 = data.Dataset.load_from_directory('custom')
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
# serialization of empty get_dataset
dataset = data.Dataset(basedir='custom')
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
def test_dataset_serialization_deserialization_custom_param():
class MockDiscreteHyperParameter(hp.DiscreteHyperParameter):
def __init__(self, name, values, seed=None):
super(MockDiscreteHyperParameter, self).__init__(name, values, seed)
# register the new hyper parameters
hp.set_custom_parameter_class(MockDiscreteHyperParameter)
params = get_hyperparameter_list()
params.append(MockDiscreteHyperParameter('mock-param', ['x', 'y']))
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
# serialization
dataset.save_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization
dataset.clear()
assert len(dataset) == 0
dataset.restore_dataset()
assert len(dataset) == 5
assert os.path.exists(dataset.data_path)
assert os.path.exists(dataset.parameter_path)
# deserialization from class
path = os.path.join('shac', 'datasets')
dataset2 = data.Dataset.load_from_directory(path)
assert dataset2.parameters is not None
assert len(dataset2.X) == 5
assert len(dataset2.Y) == 5
assert len(dataset2) == 5
assert 'mock-param' in dataset2.parameters.name_map.values()
assert dataset2.parameters.num_choices == 5
dataset3 = data.Dataset.load_from_directory()
assert dataset3.parameters is not None
assert len(dataset3.X) == 5
assert len(dataset3.Y) == 5
assert 'mock-param' in dataset3.parameters.name_map.values()
assert dataset3.parameters.num_choices == 5
# serialization of empty get_dataset
dataset = data.Dataset()
with pytest.raises(FileNotFoundError):
dataset.load_from_directory('null')
with pytest.raises(ValueError):
dataset.save_dataset()
@cleanup_dirs
@deterministic_test
def test_dataset_single_encoding_decoding():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset()
y_values = [0.]
assert encoded_x.shape == (1, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 4)
@cleanup_dirs
@deterministic_test
def test_dataset_single_multi_encoding_decoding():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset()
y_values = [0.]
assert encoded_x.shape == (1, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 14)
@cleanup_dirs
@deterministic_test
def test_dataset_single_encoding_decoding_min():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0.]
assert encoded_x.shape == (1, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 4)
@cleanup_dirs
@deterministic_test
def test_dataset_single_multi_encoding_decoding_min():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
sample = (h.sample(), np.random.uniform())
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0.]
assert encoded_x.shape == (1, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (1,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (1, 14)
@cleanup_dirs
@deterministic_test
def test_dataset_encoding_decoding():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
decoded_x2 = dataset.decode_dataset()
assert decoded_x.shape == (5, 4)
assert len(decoded_x) == len(decoded_x2)
x, y = dataset.get_dataset()
x_ = x[:, :3].astype('float')
decoded_x_ = decoded_x[:, :3].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
@cleanup_dirs
@deterministic_test
def test_dataset_multi_encoding_decoding():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
decoded_x2 = dataset.decode_dataset()
assert decoded_x.shape == (5, 14)
assert len(decoded_x) == len(decoded_x2)
x, y = dataset.get_dataset()
x_ = x[:, :10].astype('float')
decoded_x_ = decoded_x[:, :10].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
@cleanup_dirs
@deterministic_test
def test_dataset_encoding_decoding_min():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (5, 4)
x, y = dataset.get_dataset()
x_ = x[:, :3].astype('float')
decoded_x_ = decoded_x[:, :3].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 4)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
@cleanup_dirs
@deterministic_test
def test_dataset_multi_encoding_decoding_min():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params, seed=0)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
encoded_x, encoded_y = dataset.encode_dataset(objective='min')
y_values = [0., 0., 0., 1., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
assert np.allclose(y_values, encoded_y, rtol=1e-3)
decoded_x = dataset.decode_dataset(encoded_x)
assert decoded_x.shape == (5, 14)
x, y = dataset.get_dataset()
x_ = x[:, :10].astype('float')
decoded_x_ = decoded_x[:, :10].astype('float')
assert np.allclose(x_, decoded_x_, rtol=1e-3)
samples2 = [(h.sample(), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples2)
encoded_x, encoded_y = dataset.encode_dataset(x, y, objective='min')
y_values = [0., 1., 0., 0., 1.]
assert encoded_x.shape == (5, 14)
assert encoded_x.dtype == np.float64
assert encoded_y.shape == (5,)
assert encoded_y.dtype == np.float64
print(encoded_y)
assert np.allclose(y_values, encoded_y, rtol=1e-3)
if __name__ == '__main__':
pytest.main([__file__])
|
import os
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse
from course.models import Course
from .constants import PERSON_STATUSES
class Person(models.Model):
class Meta:
verbose_name = 'person'
verbose_name_plural = 'persons'
ordering = ['first_name', 'last_name']
first_name = models.CharField('first_name', max_length=20)
last_name = models.CharField('last_name', max_length=40)
status = models.CharField(choices=PERSON_STATUSES, max_length=50)
email = models.EmailField('email', blank=True)
cellphone = models.CharField('phone', max_length=15, blank=True)
image = models.ImageField('picture',
upload_to='img/profiles',
blank=True,
null=True)
user = models.OneToOneField(
User,
blank=True,
null=True,
verbose_name='user',
on_delete=models.CASCADE,
)
courses = models.ManyToManyField(
Course, related_name="staff", blank=True)
website = models.CharField(
'website url', max_length=100, blank=True)
github = models.CharField(
'github username', max_length=50, blank=True)
twitter = models.CharField(
'twitter username', max_length=50, blank=True)
instagram = models.CharField(
'instagram username', max_length=50, blank=True)
facebook = models.CharField(
'facebook url', max_length=100, blank=True)
objects = models.Manager()
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
def get_absolute_url(self):
return reverse("person_detail", kwargs={"pk": self.pk})
def get_website_label(self):
return self.website.split('.', 1)[1]
def get_github_url(self):
github_url_start = 'https://github.com/'
return github_url_start + self.github
def get_twitter_url(self):
github_url_start = 'https://twitter.com/'
return github_url_start + self.twitter
def get_instagram_url(self):
github_url_start = 'https://www.instagram.com/'
return github_url_start + self.instagram
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['KeyVersionArgs', 'KeyVersion']
@pulumi.input_type
class KeyVersionArgs:
def __init__(__self__, *,
key_id: pulumi.Input[str]):
"""
The set of arguments for constructing a KeyVersion resource.
:param pulumi.Input[str] key_id: The id of the master key (CMK).
"""
pulumi.set(__self__, "key_id", key_id)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Input[str]:
"""
The id of the master key (CMK).
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: pulumi.Input[str]):
pulumi.set(self, "key_id", value)
@pulumi.input_type
class _KeyVersionState:
def __init__(__self__, *,
key_id: Optional[pulumi.Input[str]] = None,
key_version_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering KeyVersion resources.
:param pulumi.Input[str] key_id: The id of the master key (CMK).
:param pulumi.Input[str] key_version_id: The id of the Alikms key version.
"""
if key_id is not None:
pulumi.set(__self__, "key_id", key_id)
if key_version_id is not None:
pulumi.set(__self__, "key_version_id", key_version_id)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the master key (CMK).
"""
return pulumi.get(self, "key_id")
@key_id.setter
def key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_id", value)
@property
@pulumi.getter(name="keyVersionId")
def key_version_id(self) -> Optional[pulumi.Input[str]]:
"""
The id of the Alikms key version.
"""
return pulumi.get(self, "key_version_id")
@key_version_id.setter
def key_version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_version_id", value)
class KeyVersion(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Alikms Key Version resource. For information about Alikms Key Version and how to use it, see [What is Resource Alikms Key Version](https://www.alibabacloud.com/help/doc-detail/133838.htm).
> **NOTE:** Available in v1.85.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
this = alicloud.kms.Key("this")
keyversion = alicloud.kms.KeyVersion("keyversion", key_id=this.id)
```
## Import
Alikms key version can be imported using the id, e.g.
```sh
$ pulumi import alicloud:kms/keyVersion:KeyVersion example 72da539a-2fa8-4f2d-b854-*****
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key_id: The id of the master key (CMK).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KeyVersionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Alikms Key Version resource. For information about Alikms Key Version and how to use it, see [What is Resource Alikms Key Version](https://www.alibabacloud.com/help/doc-detail/133838.htm).
> **NOTE:** Available in v1.85.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
this = alicloud.kms.Key("this")
keyversion = alicloud.kms.KeyVersion("keyversion", key_id=this.id)
```
## Import
Alikms key version can be imported using the id, e.g.
```sh
$ pulumi import alicloud:kms/keyVersion:KeyVersion example 72da539a-2fa8-4f2d-b854-*****
```
:param str resource_name: The name of the resource.
:param KeyVersionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KeyVersionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KeyVersionArgs.__new__(KeyVersionArgs)
if key_id is None and not opts.urn:
raise TypeError("Missing required property 'key_id'")
__props__.__dict__["key_id"] = key_id
__props__.__dict__["key_version_id"] = None
super(KeyVersion, __self__).__init__(
'alicloud:kms/keyVersion:KeyVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key_id: Optional[pulumi.Input[str]] = None,
key_version_id: Optional[pulumi.Input[str]] = None) -> 'KeyVersion':
"""
Get an existing KeyVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key_id: The id of the master key (CMK).
:param pulumi.Input[str] key_version_id: The id of the Alikms key version.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _KeyVersionState.__new__(_KeyVersionState)
__props__.__dict__["key_id"] = key_id
__props__.__dict__["key_version_id"] = key_version_id
return KeyVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="keyId")
def key_id(self) -> pulumi.Output[str]:
"""
The id of the master key (CMK).
"""
return pulumi.get(self, "key_id")
@property
@pulumi.getter(name="keyVersionId")
def key_version_id(self) -> pulumi.Output[str]:
"""
The id of the Alikms key version.
"""
return pulumi.get(self, "key_version_id")
|
#
# Test our constraints parser
#
import unittest
from juju import constraints
class TestConstraints(unittest.TestCase):
def test_mem_regex(self):
m = constraints.MEM
self.assertTrue(m.match("10G"))
self.assertTrue(m.match("1G"))
self.assertFalse(m.match("1Gb"))
self.assertFalse(m.match("a1G"))
self.assertFalse(m.match("1000"))
def test_normalize_key(self):
_ = constraints.normalize_key
self.assertEqual(_("test-key"), "test_key")
self.assertEqual(_("test-key "), "test_key")
self.assertEqual(_(" test-key"), "test_key")
self.assertEqual(_("TestKey"), "test_key")
self.assertEqual(_("testKey"), "test_key")
def test_normalize_val(self):
_ = constraints.normalize_value
self.assertEqual(_("10G"), 10 * 1024)
self.assertEqual(_("10M"), 10)
self.assertEqual(_("10"), 10)
self.assertEqual(_("foo,bar"), "foo,bar")
def test_normalize_list_val(self):
_ = constraints.normalize_list_value
self.assertEqual(_("foo"), ["foo"])
self.assertEqual(_("foo,bar"), ["foo", "bar"])
def test_parse_constraints(self):
_ = constraints.parse
self.assertEqual(
_("mem=10G"),
{"mem": 10 * 1024}
)
self.assertEqual(
_("mem=10G foo=bar,baz tags=tag1 spaces=space1,space2"),
{"mem": 10 * 1024,
"foo": "bar,baz",
"tags": ["tag1"],
"spaces": ["space1", "space2"]}
)
|
import datetime
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple
from urllib.parse import parse_qs, urlparse
import requests
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.x509.oid import NameOID
from twisted.internet.defer import inlineCallbacks
from hathor.conf import HathorSettings
from hathor.p2p.peer_discovery import DNSPeerDiscovery
from hathor.transaction.genesis import GENESIS_HASH
if TYPE_CHECKING:
from cryptography.hazmat.backends.openssl.rsa import _RSAPrivateKey
from cryptography.x509 import Certificate
settings = HathorSettings()
def discover_hostname() -> Optional[str]:
""" Try to discover your hostname. It is a synchonous operation and
should not be called from twisted main loop.
"""
return discover_ip_ipify()
def discover_ip_ipify() -> Optional[str]:
""" Try to discover your IP address using ipify's api.
It is a synchonous operation and should not be called from twisted main loop.
"""
response = requests.get('https://api.ipify.org')
if response.ok:
# It may be either an ipv4 or ipv6 in string format.
ip = response.text
return ip
return None
def description_to_connection_string(description: str) -> Tuple[str, Optional[str]]:
""" The description returned from DNS query may contain a peer-id parameter
This method splits this description into the connection URL and the peer-id (in case it exists)
Expected description is something like: tcp://127.0.0.1:40403/?id=123
The expected returned tuple in this case would be ('tcp://127.0.0.1:40403', '123')
"""
result = urlparse(description)
url = "{}://{}".format(result.scheme, result.netloc)
peer_id = None
if result.query:
query_result = parse_qs(result.query)
if 'id' in query_result:
peer_id = query_result['id'][0]
return url, peer_id
def get_genesis_short_hash() -> str:
""" Return the first 7 chars of the GENESIS_HASH used for validation that the genesis are the same
"""
return GENESIS_HASH.hex()[:7]
def get_settings_hello_dict() -> Dict[str, Any]:
""" Return a dict of settings values that must be validated in the hello state
"""
settings_dict = {}
for key in settings.P2P_SETTINGS_HASH_FIELDS:
value = getattr(settings, key)
# We are going to json.dumps this dict, so we can't have bytes here
if type(value) == bytes:
value = value.hex()
settings_dict[key] = value
return settings_dict
def connection_string_to_host(connection_string: str) -> str:
""" From a connection string I return the host
tcp://127.0.0.1:40403 -> 127.0.0.1
"""
return urlparse(connection_string).netloc.split(':')[0]
@inlineCallbacks
def discover_dns(host: str, test_mode: int = 0) -> Generator[Any, Any, List[str]]:
""" Start a DNS peer discovery object and execute a search for the host
Returns the DNS string from the requested host
E.g., localhost -> tcp://127.0.0.1:40403
"""
discovery = DNSPeerDiscovery([], test_mode=test_mode)
result = yield discovery.dns_seed_lookup(host)
return result
def generate_certificate(private_key: '_RSAPrivateKey', ca_file: str, ca_pkey_file: str) -> 'Certificate':
""" Generate a certificate signed by the ca file passed as parameters
This certificate is used to start the TLS connection between peers and contains the peer public key
"""
_BACKEND = default_backend()
with open(ca_file, 'rb') as f:
ca = x509.load_pem_x509_certificate(data=f.read(), backend=_BACKEND)
with open(ca_pkey_file, 'rb') as f:
ca_pkey = load_pem_private_key(f.read(), password=None, backend=_BACKEND)
public_key = private_key.public_key()
builder = x509.CertificateBuilder()
builder = builder.issuer_name(ca.issuer)
subject = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, 'Hathor full node')
])
builder = builder.subject_name(subject)
builder = builder.not_valid_before(datetime.datetime.utcnow() - datetime.timedelta(hours=1))
builder = builder.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(hours=24*365*100))
builder = builder.serial_number(x509.random_serial_number())
builder = builder.public_key(public_key)
builder = builder.add_extension(
x509.BasicConstraints(
ca=False, path_length=None), critical=True)
certificate = builder.sign(
private_key=ca_pkey,
algorithm=hashes.SHA256(),
backend=_BACKEND
)
return certificate
|
from leg_data import *
from hex_walker_data import *
TORSO_ARM_TABLE = {
"NEUTRAL" : Leg_Position(0, 45, 45),
"ON_HIP" : Leg_Position(0, 90, 0),
"UP" : Leg_Position(90, 0, 180),
"STRAIGHT_OUT" : Leg_Position(90,90,90),
"STRAIGHT_FOR" :Leg_Position(90,0,90),
"WAVE_UP" : Leg_Position(30, 40, 180),
"WAVE_DOWN" : Leg_Position(55,120,180),
"HAND_SHAKE_UP" :Leg_Position(45,10,80),
"HAND_SHAKE_MID" :Leg_Position(45, 10, 70),
"HAND_SHAKE_DOWN" : Leg_Position(45,10,60),
"JOHNNY_BRAVO_MONKEY_DOWN" : Leg_Position(90,0,80),
"JOHNNY_BRAVO_MONKEY_UP" : Leg_Position(90,0,100),
"BLOCKING_UP" : Leg_Position(30, 0, 180),
"BLOCKING_FRONT" : Leg_Position(30, 0, 90),
"LOOKING" : Leg_Position(20, 5, 155)
}
class Torso_Position(object):
def __init__(self, right_arm, left_arm, description):
self.right_arm = right_arm
self.left_arm = left_arm
self.description = description
def __str__(self):
start_str = "-----------------------Torso position is-------------------------"
right_arm_string = "right arm: " + str(self.right_arm) + "\n"
left_arm_string = "left arm: " + str(self.left_arm) + "\n"
return right_arm_string + left_arm_string + self.description
# relaxed
TORSO_NEUTRAL = 1
# jumping jacks
TORSO_JACK_DOWN = 2
TORSO_JACK_UP = 3
# right hand wave
TORSO_WAVE_DOWN = 4
TORSO_WAVE_UP = 5
# right hand shake
TORSO_SHAKE_DOWN = 6
TORSO_SHAKE_UP = 7
# dancing in front
TORSO_DANCE_FRONT_LEFT_OUT = 8
TORSO_DANCE_FRONT_RIGHT_OUT = 9
# dancing above
TORSO_DANCE_ABOVE_LEFT_UP = 10
TORSO_DANCE_ABOVE_RIGHT_UP = 11
# jognny bravo dance
TORSO_MONKEY_RIGHT_UP = 12
TORSO_MONKEY_LEFT_UP = 13
# finish hand shake
TORSO_SHAKE_MID = 14
# looking
TORSO_LOOKING = 15
# pointing
TORSO_POINTING_LEFT = 16
TORSO_POINTING_RIGHT= 17
TORSO_POSITIONS = {
# 1
TORSO_NEUTRAL:
Torso_Position(TORSO_ARM_TABLE["NEUTRAL"],
TORSO_ARM_TABLE["NEUTRAL"],
"torso is in the neutral position"),
# 2
TORSO_JACK_DOWN:
Torso_Position(TORSO_ARM_TABLE["WAVE_DOWN"],
TORSO_ARM_TABLE["WAVE_DOWN"],
"jumping jacks (down pos)"),
# 3
TORSO_JACK_UP:
Torso_Position(TORSO_ARM_TABLE["WAVE_UP"],
TORSO_ARM_TABLE["WAVE_UP"],
"jumping jacks (up pos)"),
# 4
TORSO_WAVE_DOWN:
Torso_Position(TORSO_ARM_TABLE["WAVE_DOWN"],
TORSO_ARM_TABLE["NEUTRAL"],
"waving with the right hand (down pos)"),
# 5
TORSO_WAVE_UP:
Torso_Position(TORSO_ARM_TABLE["WAVE_UP"],
TORSO_ARM_TABLE["NEUTRAL"],
"waving with the right hand (up pos)"),
# 6
TORSO_SHAKE_DOWN:
Torso_Position(TORSO_ARM_TABLE["HAND_SHAKE_DOWN"],
TORSO_ARM_TABLE["NEUTRAL"],
"handshaking with the right hand (down pos)"),
# 14
TORSO_SHAKE_MID:
Torso_Position(TORSO_ARM_TABLE["HAND_SHAKE_MID"],
TORSO_ARM_TABLE["NEUTRAL"],
"handshaking with the rigth hand (mid pos)"),
# 7
TORSO_SHAKE_UP:
Torso_Position(TORSO_ARM_TABLE["HAND_SHAKE_UP"],
TORSO_ARM_TABLE["NEUTRAL"],
"handshaking with the right hand (up pos)"),
# 8
TORSO_DANCE_FRONT_LEFT_OUT:
Torso_Position(TORSO_ARM_TABLE["BLOCKING_FRONT"],
TORSO_ARM_TABLE["STRAIGHT_OUT"],
"dance move with left arm out"),
# 9
TORSO_DANCE_FRONT_RIGHT_OUT:
Torso_Position(TORSO_ARM_TABLE["STRAIGHT_OUT"],
TORSO_ARM_TABLE["BLOCKING_FRONT"],
"dance move with right arm out"),
# 10
TORSO_DANCE_ABOVE_LEFT_UP:
Torso_Position(TORSO_ARM_TABLE["BLOCKING_UP"],
TORSO_ARM_TABLE["WAVE_DOWN"],
"dance move with left arm above head"),
# 11
TORSO_DANCE_ABOVE_RIGHT_UP:
Torso_Position(TORSO_ARM_TABLE["WAVE_DOWN"],
TORSO_ARM_TABLE["BLOCKING_UP"],
"dance move with right arm above head"),
# 13
TORSO_MONKEY_RIGHT_UP:
Torso_Position(TORSO_ARM_TABLE["JOHNNY_BRAVO_MONKEY_UP"],
TORSO_ARM_TABLE["JOHNNY_BRAVO_MONKEY_DOWN"],
"starting johnny bravo's monkey dance"),
TORSO_MONKEY_LEFT_UP:
Torso_Position(TORSO_ARM_TABLE["JOHNNY_BRAVO_MONKEY_DOWN"],
TORSO_ARM_TABLE["JOHNNY_BRAVO_MONKEY_UP"],
"finishing johnny bravo's monkey dance"),
# 15
TORSO_LOOKING:
Torso_Position(TORSO_ARM_TABLE["NEUTRAL"],
TORSO_ARM_TABLE["LOOKING"],
"raising hand to act like it is looking around"),
# 16
TORSO_POINTING_LEFT:
Torso_Position(TORSO_ARM_TABLE["NEUTRAL"],
TORSO_ARM_TABLE["STRAIGHT_OUT"],
"pointing left arm out"),
# 17
TORSO_POINTING_RIGHT:
Torso_Position(TORSO_ARM_TABLE["STRAIGHT_OUT"],
TORSO_ARM_TABLE["NEUTRAL"],
"pointing right arm out")
}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 20:47:43 2020
"""
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
data = Dataset(r"C:\Users\Jiacheng Li\Desktop\Study\University of Birmingham Relevant\Final Year Project\NetCDF_Handling\NetCDF_data\1980.nc", "r")
# Store different variables.
lats = data.variables["lat"][:]
lons = data.variables["lon"][:]
time = data.variables["time"][:]
tave = data.variables["tave"][:]
# Create a basemap in a specified region using coordinates.
mp = Basemap(projection = "merc",
llcrnrlon = 65.8,
llcrnrlat = -2,
urcrnrlon = 145.37,
urcrnrlat = 38.78,
resolution = "i")
lon, lat = np.meshgrid(lons, lats)
x, y = mp(lon, lat)
days = np.arange(0, len(time))
# Generate images in jpg format.
for day in days:
colorMap = mp.pcolor(x, y, np.squeeze(tave[day,:,:]), cmap = "rainbow")
mp.drawcoastlines()
mp.drawstates()
mp.drawcountries()
char = mp.colorbar(colorMap, location = "right", pad = "10%")
plt.title("Average Temparature Day:" + str(day + 1) + "of Year 1980")
plt.clim(-40, 40)
plt.savefig("Images/" + str(day + 1) + ".jpg")
# Clear the plot at the end of each loop.
plt.clf()
|
# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
import sys
from hyperstream import TimeInterval
from hyperstream.utils import datetime2unix
from .helpers import *
RANDOM_VALUES = {
'betavariate': [
0.039981834097823, 0.981304494343183, 0.103808554963057, 0.138446289876271,
0.155847790152019, 0.936165436117980, 0.691777297709685, 0.202711794550541,
0.439187581262989, 0.854411325237021, 0.012871422905004, 0.445711666122735,
0.173612011072429, 0.294213597937781, 0.571754488117056, 0.414207690302475,
0.952829471212172, 0.554866330696849, 0.853597747841655, 0.726548827146225,
0.434566859684085, 0.845728292811518, 0.755650779991056, 0.967643717137684,
0.855246132463383, 0.622668240929598, 0.715418188650215, 0.110575048120036,
0.975782622203252, 0.911339685810993, 0.770410055493041, 0.494632012614062,
0.214519001820389, 0.245253365269930, 0.615395718030917, 0.768849947883717,
0.143943873499559, 0.553601557863770, 0.280901407530050, 0.772551703947893,
0.782327973677141, 0.486333485714099, 0.750803107845182, 0.102266594101819,
0.068535570781350, 0.914774771182072, 0.586769119311220, 0.233320009269012,
0.233797285521539, 0.276737112045214, 0.218306114082117, 0.162816644080913,
0.953114601742251, 0.666718604507751, 0.754286487729222, 0.633736098912436,
0.745322580574953, 0.775601376437185, 0.869843144466489, 0.652589692264996
],
'expovariate': [
3.394823806874659, 0.581127564523488, 0.007519672058314, 2.418848860875040,
2.801300959473452, 0.872818427870305, 1.113411706270757, 0.087671478631061,
1.454491546927796, 0.270247967319917, 0.031298757464487, 1.554820555740570,
0.424783969801393, 0.976256988533290, 0.956632880871021, 0.160819948534567,
0.202227141819855, 0.121504542781544, 0.014726687798474, 0.666995224752983,
3.349598620046617, 0.066740710553357, 0.778897212763763, 0.627169493658980,
0.919956064910344, 0.093134261821545, 0.865128825812668, 0.314143102991765,
0.812905426912728, 1.034607693670155, 0.655921477976220, 0.438875802168443,
0.286552206139466, 2.710786411749832, 0.604016087463631, 0.755365636542648,
0.019488234358730, 0.709483750170804, 0.005797007645201, 0.155214410916815,
0.640226369456586, 0.473766605605160, 0.055697926496315, 0.885588186216424,
0.179130522055541, 0.814931243906700, 0.155772001995568, 2.769506831287718,
1.473945818640017, 3.145001077306131, 0.152251542816020, 0.364408641013038,
0.040394611099021, 0.324046924239758, 1.642542394521078, 0.195215975982500,
0.167910514134237, 3.094860156976556, 0.167887905027271, 1.795100810202958
],
'gammavariate': [
0.034122056303517, 0.819316938516747, 4.893990231207050, 0.093238767965875,
0.062653368633307, 0.540893888677075, 0.398146730647281, 2.477674145470838,
0.265945456658999, 1.440498095730080, 3.479785441733883, 0.237277063974075,
1.061059410972392, 0.472757056386849, 0.484807556477113, 1.906802450982214,
1.697773907743712, 2.167940835057879, 4.225448242167752, 0.720001469650587,
0.035729192259713, 2.740124924523351, 0.614173008703897, 0.763787353084732,
0.508389510898236, 2.419918892177294, 0.546448162063484, 1.310869670605297,
0.586209300594369, 0.439072201829716, 0.731812395588524, 1.034964061541275,
1.389691639897196, 0.068797714057458, 0.791006964997359, 0.634574154700323,
3.947672654322900, 0.677073210230625, 5.153310521407328, 1.939551415551206,
0.749025865807436, 0.974588922379475, 2.915532064817145, 0.531830509662681,
1.807869194387956, 0.584597328850929, 1.936237033639271, 0.064744318636450,
0.260092883032779, 0.044021772573587, 1.957381341407605, 1.186156762423377,
3.229188209498897, 1.284518963073033, 0.215035876124057, 1.729669369589433,
1.867104880870748, 0.046338583263660, 1.867228551311800, 0.181654626260094
],
'gauss': [
01.054219641927238, -0.225557255750686, 02.197040548376180, 00.103491789727369,
01.226197529624492, -0.492012676750713, -0.198114993904518, -0.368908398457307,
00.075994371191587, -0.731246053108685, 01.730469790846852, 00.339286625801242,
-0.793301565330494, 01.150298484529457, -0.423484998531862, -0.377227190281404,
00.201191163960159, 00.450034666561860, 01.150116230720591, 00.105939158666782,
00.356502892758491, -0.079919387895188, -1.082857059855480, -0.285936313257175,
-0.346814969321444, -0.256881102258723, -0.696979366413084, -0.377499627520273,
-1.348994444204497, -0.499429050862792, -0.930241287708008, 00.111367638837469,
00.012404354114906, 02.328394072209139, -1.176780706969193, 00.354849884867363,
01.182456826105767, 00.144094950423267, 00.556793983695921, 00.020230708184767,
-0.959260150198715, 00.165387954371103, 01.254495799595116, 00.444315947531775,
00.656742549085982, 01.094783865442602, 01.451280543224706, 01.852781273500150,
00.329648919441743, -2.486228819823942, 00.539083396938225, 00.661971580335303,
00.780264495057645, 00.198194768427309, 00.217236306069111, -0.585867168640169,
01.403963278880950, 02.053924883121816, 01.069438836743639, 01.564129852304573
],
'lognormvariate': [
4.182149649506653, 6.072683633963685, 1.378906052287930, 1.820308315387552,
0.496143003914426, 1.262823784820494, 0.541232922428520, 0.197427904210337,
1.141079547621132, 1.210524252689620, 1.203885159410419, 1.313149056265397,
0.950794910474111, 0.843500437187102, 0.187032058314804, 0.927866832248184,
0.156570492107917, 0.271953010190621, 0.412265059596156, 0.335500854109232,
1.894937540967587, 0.277297951845252, 0.675726657696681, 2.817759974006620,
2.502088419825137, 0.622939011683825, 2.152480312223666, 0.452792026701372,
0.882873194607882, 1.349444237752270, 0.869975688840489, 2.111560218917180,
0.409725015624480, 1.879234230032750, 2.606256173070972, 6.229504939045913,
0.906235497770029, 0.828016527006029, 0.682915284714766, 1.012706478986566,
0.367098765722139, 0.278910825237462, 0.428440501893463, 1.004907017932176,
0.185565283388187, 1.090536358789101, 0.261012404269340, 0.162505658588734,
1.124937908343475, 2.828519961139446, 0.720248041371186, 17.481108541304152,
2.289963461381308, 1.681143365809301, 0.424940732580820, 1.668947117554719,
2.447378048054388, 2.258635444051391, 0.653795316276048, 0.388972244244800
],
'normalvariate': [
01.430825384561959, 01.803800621694465, 00.321290469070466, 00.599005890800551,
-0.700891079468200, 00.233350312510460, -0.613905552258328, -1.622381803257973,
00.131974785909151, 00.191053532461377, 00.185553959787939, 00.272428112308738,
-0.050456896398401, -0.170194858695000, -1.676475241982295, -0.074867056225568,
-1.854248941609945, -1.302125984262414, -0.886088787958202, -1.092130776793514,
00.639185878079636, -1.282662712448799, -0.391966637231399, 01.035942233839799,
00.917125751078616, -0.473306659555168, 00.766620810808078, -0.792322361123261,
-0.124573696156307, 00.299692831949558, -0.139290011585451, 00.747427114481840,
-0.892269037898180, 00.630864369412055, 00.957914775126571, 01.829296865604353,
-0.098456075401950, -0.188722164643868, -0.381384461196793, 00.012626429070964,
-1.002124350723792, -1.276863171055819, -0.847603402642404, 00.004895017760377,
-1.684348526836401, 00.086669647496766, -1.343187346852594, -1.817042455734101,
00.117727841549639, 01.039753592922873, -0.328159624394767, 02.861120785911318,
00.828535861710167, 00.519474136815591, -0.855805572533170, 00.512192959069697,
00.895017267032754, 00.814760845114164, -0.424960948565885, -0.944247289465118
],
'paretovariate': [
29.809400800717510, 01.7880534401352315, 1.0075480157929015, 11.232921214436221,
16.466054512384030, 02.3936476787662757, 3.0447284136688280, 1.0916294395161450,
04.282305560549401, 01.3102893193840228, 1.0317937139180504, 4.7342369172954900,
01.529260017630925, 02.6545017930933260, 2.6029173687084950, 1.1744734841034520,
01.224126026748782, 01.1291944955974207, 1.0148356597400270, 1.9483740899647228,
28.491295513537708, 01.0690182570835713, 2.1790678917276303, 1.8723035050106858,
02.509180146459839, 01.0976090923717308, 2.3753120677749830, 1.3690856427811218,
02.254448616097609, 02.8140020682287330, 1.9269173113984270, 1.5509626492274813,
01.331827695661731, 15.0410993623286920, 1.8294513029924269, 2.1283895981144103,
01.019679369608408, 02.0329414820037095, 1.0058138428094914, 1.1679083465608209,
01.896910233245502, 01.6060321043903114, 1.0572782597459870, 2.4244099767209266,
01.196176861442035, 02.2590203455962850, 1.1685597434253272, 15.950765651183254,
04.366430336403290, 23.2197004879965800, 1.1644531094097856, 1.4396623990728115,
01.041221570722970, 01.3827121883154585, 5.1682926620010985, 1.2155734928205928,
01.182830759277237, 22.0841500072887450, 1.1828040168323900, 6.0200815768637320
],
'randint2': [
5, 3, 1, 5, 5, 3, 4, 1, 4, 2, 1, 4, 2, 4, 4, 1, 1, 1, 1, 3, 5, 1, 3, 3, 4, 1, 3, 2, 3, 4,
3, 2, 2, 5, 3, 3, 1, 3, 1, 1, 3, 2, 1, 3, 1, 3, 1, 5, 4, 5, 1, 2, 1, 2, 5, 1, 1, 5, 1, 5
],
'randint3': [
4, 1, 1, 1, 5, 1, 1, 1, 3, 2, 1, 1, 1, 3, 5, 4, 5, 4, 2, 1, 2, 1, 1, 5, 4, 2, 1, 5, 4, 1,
5, 1, 5, 5, 1, 3, 5, 1, 3, 4, 3, 3, 2, 2, 4, 5, 1, 5, 1, 1, 3, 2, 4, 4, 2, 1, 1, 5, 4, 2
],
'random': [
0.966453535692138, 0.440732599175352, 0.007491470058587, 0.910975962449124, 0.939268997363764,
0.582227573058949, 0.671563481487985, 0.083938226837084, 0.766480932791796, 0.236809775363117,
0.030814021726610, 0.788772717236283, 0.346088965597123, 0.623281475039168, 0.615815695103615,
0.148554638708287, 0.183090647409931, 0.114412969688687, 0.014618780486909, 0.486751540604758,
0.964901560916215, 0.064562280977186, 0.541088185551130, 0.465898559008309, 0.601463449561051,
0.088928829990662, 0.579002686187366, 0.269585503819448, 0.556432560556215, 0.644634234178282,
0.481036371366518, 0.355239147442983, 0.249152121361209, 0.933515498042346, 0.453388019476493,
0.530161206911590, 0.019299566309716, 0.508101925779792, 0.005780237417743, 0.143768427595595,
0.472826925347406, 0.377347440772596, 0.054175198646142, 0.587528508131069, 0.164003223741961,
0.557330237441468, 0.144245721601908, 0.937307084696224, 0.770979971519774, 0.956933122349405,
0.141227764416499, 0.305392708287698, 0.039589624227961, 0.276783694791692, 0.806512505115603,
0.177343035278254, 0.154570514710789, 0.954718655702394, 0.154551400089043, 0.833889294151231
],
'randrange2': [
18, 14, 10, 18, 18, 14, 16, 10, 16, 12, 10, 16, 12, 16, 16, 10, 10, 10, 10, 14, 18, 10, 14, 14, 16,
10, 14, 12, 14, 16, 14, 12, 12, 18, 14, 14, 10, 14, 10, 10, 14, 12, 10, 14, 10, 14, 10, 18, 16, 18,
10, 12, 10, 12, 18, 10, 10, 18, 10, 18
],
'randrange3': [
16, 10, 10, 10, 18, 10, 10, 10, 14, 12, 10, 10, 10, 14, 18, 16, 18, 16, 12, 10, 12, 10, 10, 18, 16,
12, 10, 18, 16, 10, 18, 10, 18, 18, 10, 14, 18, 10, 14, 16, 14, 14, 12, 12, 16, 18, 10, 18, 10, 10,
14, 12, 16, 16, 12, 10, 10, 18, 16, 12
],
'triangular': [
4.682762875874238, 3.626159769226909, 2.212011368448777, 4.483209798223082,
4.573159271028750, 3.869054691108233, 4.007372398360772, 2.709668486705239,
4.163007048043646, 3.191997756784259, 2.429981546533871, 4.203958639082648,
3.441018318267585, 3.933827513051516, 3.922210750833969, 2.944101600596951,
3.048114442443949, 2.828539569442598, 2.296163270716432, 3.708949748713680,
4.675507600626219, 2.622393513673718, 3.801812729810393, 3.671942389572636,
3.899679103787349, 2.730460799731220, 3.863871271607618, 3.271814854024236,
3.827182356344678, 3.966673690541899, 3.698887350061536, 3.459943452554892,
3.222666237436551, 4.553397821464158, 3.649341722281638, 3.783526630434640,
2.340290167149009, 3.746027363668379, 2.186229494190525, 2.928768305646555,
3.684328219820720, 3.504687557147854, 2.570132609027809, 3.877543887313001,
2.991977490899752, 3.828655633149338, 2.930308728117419, 4.566319534782433,
4.171109123321607, 4.640555104429366, 2.920525168856885, 3.353645540651684,
2.487378441632136, 3.288682338185075, 4.238119113868061, 3.031531973168803,
2.963028082801710, 4.631429744970086, 2.962968535588914, 4.294073574976609
],
'uniform': [
4.899360607076416, 3.322197797526058, 2.022474410175761, 4.732927887347373, 4.817806992091292,
3.746682719176847, 4.014690444463955, 2.251814680511251, 4.299442798375389, 2.710429326089353,
2.092442065179829, 4.366318151708850, 3.038266896791369, 3.869844425117505, 3.847447085310846,
2.445663916124863, 2.549271942229794, 2.343238909066063, 2.043856341460727, 3.460254621814275,
4.894704682748647, 2.193686842931558, 3.623264556653390, 3.397695677024928, 3.804390348683154,
2.266786489971987, 3.737008058562099, 2.808756511458345, 3.669297681668646, 3.933902702534848,
3.443109114099555, 3.065717442328950, 2.747456364083627, 4.800546494127040, 3.360164058429480,
3.590483620734771, 2.057898698929151, 3.524305777339376, 2.017340712253229, 2.431305282786786,
3.418480776042219, 3.132042322317789, 2.162525595938428, 3.762585524393208, 2.492009671225883,
3.671990712324404, 2.432737164805725, 4.811921254088673, 4.312939914559324, 4.870799367048216,
2.423683293249498, 2.916178124863096, 2.118768872683885, 2.830351084375076, 4.419537515346811,
2.532029105834762, 2.463711544132369, 4.864155967107185, 2.463654200267129, 4.501667882453695
],
'vonmisesvariate': [
3.357856054826835, 5.157489756582599, 4.479739182645804, 1.907971117523622,
5.055141604545795, 6.107096891938155, 2.915387721029356, 5.460182454227138,
1.116271426329938, 4.960737570079505, 5.887441356819019, 0.981316492756079,
6.274362166448599, 0.632521528607341, 6.028546694550388, 3.418698935619459,
0.060489178998973, 0.276154477688722, 6.043683015273760, 5.632838807299208,
2.045567387571900, 0.834502386051994, 0.763471240922435, 5.805628236451655,
0.755672107602623, 2.112922851993352, 4.920322771565935, 4.749565803242515,
3.723684053158859, 4.352031602899314, 5.614316866936048, 6.225137698980708,
5.766737554488466, 5.873170966841112, 0.316277234794306, 2.436297088323581,
0.600638552593339, 1.532669183971546, 0.488588701880799, 0.903538039802433,
0.547776055749545, 6.176392770219052, 1.428120214925881, 6.053689328434808,
1.672377583279894, 0.640266865453971, 5.282504365451881, 1.467161497219122,
6.140657913249537, 4.016127529818116, 0.628520977491434, 1.141278277115692,
5.517591788868223, 5.783409984284813, 5.543137983515277, 5.603426529843089,
5.666252112039157, 5.331163516794262, 1.405021421913882, 4.657615032038910
],
'weibullvariate': [
3.394823806874659, 0.581127564523488, 0.007519672058314, 2.418848860875040,
2.801300959473452, 0.872818427870305, 1.113411706270757, 0.087671478631061,
1.454491546927796, 0.270247967319917, 0.031298757464487, 1.554820555740570,
0.424783969801393, 0.976256988533290, 0.956632880871021, 0.160819948534567,
0.202227141819855, 0.121504542781544, 0.014726687798474, 0.666995224752983,
3.349598620046617, 0.066740710553357, 0.778897212763763, 0.627169493658980,
0.919956064910344, 0.093134261821545, 0.865128825812668, 0.314143102991765,
0.812905426912728, 1.034607693670155, 0.655921477976220, 0.4388758021684434,
0.286552206139466, 2.710786411749832, 0.604016087463631, 0.755365636542648,
0.019488234358730, 0.709483750170804, 0.005797007645201, 0.155214410916815,
0.640226369456586, 0.473766605605160, 0.055697926496315, 0.885588186216424,
0.179130522055541, 0.814931243906700, 0.155772001995568, 2.769506831287718,
1.473945818640017, 3.145001077306131, 0.152251542816020, 0.364408641013038,
0.040394611099021, 0.324046924239758, 1.642542394521078, 0.195215975982500,
0.167910514134237, 3.094860156976556, 0.167887905027271, 1.795100810202958
],
'custom': [
-0.987982323681744, -0.403745561951705, 00.551693007468676, 00.999907570085015, 00.528811724075207,
-0.428471182309132, -0.991819659674552, -0.643293715945895, 00.296673503522449, 00.963880472032213,
00.744900179718102, -0.158937902545648, -0.916649210188621, -0.831597461328594, 00.018021158368760,
00.851071208170706, 00.901650314096472, 00.123256279415406, -0.768459010134729, -0.953656629697290,
-0.262066741929040, 00.670466099786067, 00.986575501370702, 00.395631936821145, -0.559053805891616,
-0.999748057676344, -0.521278555807829, 00.436452046271176, 00.992910649810199, 00.636491780955790,
-0.305114695977134, -0.966200128537194, -0.738965618780344, 00.167670472968601, 00.920151105122217,
00.826649054720701, -0.026870324303591, -0.855685251082012, -0.897787104210346, -0.114467634085048,
00.774092850923502, 00.950955938705069, 00.253514521999197, -0.677006977090617, -0.985091383620956,
-0.387487315031860, 00.566370804008219, 00.999510217795927, 00.513704546819593, -0.444398715432779,
-0.993923848165933, -0.629639978609997, 00.313531983546438, 00.968444085957101, 00.732973161947531,
-0.176389906877676, -0.923580908783280, -0.821635882465121, 00.035717385023463, 00.860232253440635
]
}
SEA_ICE_SUMS = [
15.48, 16.15, 17.26, 18.21, 19.38, 18.61, 18.87, 18.80, 20.38, 20.82,
17.96, 15.88, 15.16, 15.89, 17.89, 19.21, 19.47, 18.78, 19.10, 18.84,
21.40, 20.54, 17.67, 15.66, 15.30, 15.80, 17.67, 18.90, 19.79, 19.62,
19.43, 19.41, 21.45, 20.78, 17.66, 15.28, 14.84, 15.86, 17.77, 19.15,
19.48, 18.80, 18.48, 19.14, 21.07, 21.21, 17.90, 16.06, 15.63, 16.60,
18.22, 19.72, 20.25, 19.88, 19.49, 19.60, 21.15, 21.29, 18.38, 16.34,
15.42, 16.02, 17.67, 18.99, 19.23, 18.52, 18.77, 18.80, 19.38, 20.33,
18.05, 15.50, 14.59, 15.39, 17.51, 19.79, 20.78, 20.15, 19.45, 20.03,
21.34, 19.84, 17.00, 14.95, 15.02, 15.47, 17.24, 18.99, 19.26, 19.07,
19.03, 19.33, 20.40, 20.49, 18.03, 15.64, 15.51, 16.14, 17.80, 19.17,
19.36, 18.94, 18.59, 19.00, 21.10, 20.58, 17.46, 15.53, 15.08, 15.98,
18.13, 19.60, 19.92, 19.46, 18.82, 18.75, 20.90, 20.61, 17.55, 15.01,
14.65, 15.70, 17.74, 19.65, 19.94, 19.20, 19.26, 19.29, 20.80, 20.52,
17.67, 15.82, 15.52, 16.87, 18.21, 19.24, 19.48, 18.71, 18.53, 18.82,
19.83, 19.90, 16.65, 15.00, 15.11, 15.65, 16.84, 18.20, 18.73, 18.76,
17.88, 17.98, 19.93, 20.24, 18.05, 16.22, 15.76, 16.52, 17.86, 19.17,
20.12, 19.14, 18.10, 18.06, 19.71, 19.86, 17.77, 15.88, 15.15, 16.03,
17.41, 18.95, 19.98, 19.50, 18.72, 18.86, 20.38, 20.84, 17.67, 14.86,
14.18, 15.33, 17.28, 18.70, 18.94, 18.44, 18.00, 18.79, 19.63, 20.42,
16.63, 14.24, 14.00, 14.38, 16.17, 17.47, 18.83, 18.50, 18.69, 19.06,
20.32, 19.57, 16.38, 14.66, 14.41, 14.97, 16.37, 18.15, 18.54, 17.55,
17.05, 18.07, 18.71, 20.03, 18.39, 15.80, 15.13, 16.79, 18.56, 19.34,
19.78, 18.50, 17.39, 17.20, 19.40, 20.51, 17.75, 15.24, 14.71, 15.98,
18.40, 19.40, 19.83, 18.66, 18.08, 18.34, 19.04, 19.62, 17.04, 14.75,
14.55, 15.69, 17.31, 18.67, 19.49, 18.81, 18.32, 17.26, 19.39, 19.81,
16.69, 14.25, 14.12, 14.58
]
def rng_helper(hs, ticker, ti, tool_name, **kwargs):
random = hs.channel_manager.memory.get_or_create_stream(tool_name)
random.purge()
tool = getattr(hs.plugins.data_generators.tools, tool_name)
tool(**kwargs).execute(sources=[], sink=random, interval=ti, alignment_stream=ticker)
values = random.window().values()
print(values)
# tester.assertListEqual(values, RANDOM_VALUES[tool_name])
# some values are different between python 2.x and python 3.x
if tool_name not in RANDOM_VALUES:
if sys.version_info[0] < 3:
rvs = RANDOM_VALUES[tool_name + '2']
else:
rvs = RANDOM_VALUES[tool_name + '3']
else:
rvs = RANDOM_VALUES[tool_name]
assert_all_close(values, rvs, 1e-15)
return random
class TestTools(unittest.TestCase):
def run(self, result=None):
with resource_manager() as resource:
self.hs = resource
super(TestTools, self).run(result)
def test_data_generators(self):
ti = TimeInterval(t1, t1 + minute)
# Create a clock stream to align the random numbers to
ticker = self.hs.channel_manager.memory.get_or_create_stream("ticker")
try:
self.hs.tools.clock().execute(sources=[], sink=ticker, interval=ti)
except AttributeError:
raise
# Test random number generators
rng_helper(self.hs, ticker, ti, "betavariate", alpha=1.0, beta=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "expovariate", lambd=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "gammavariate", alpha=1.0, beta=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "gauss", seed=1234)
rng_helper(self.hs, ticker, ti, "lognormvariate", mu=0.0, sigma=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "normalvariate", mu=0.0, sigma=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "paretovariate", alpha=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "randint", a=1, b=5, seed=1234)
rng_helper(self.hs, ticker, ti, "random", seed=1234)
rng_helper(self.hs, ticker, ti, "randrange", start=10, stop=20, step=2, seed=1234)
rng_helper(self.hs, ticker, ti, "triangular", low=2, high=5, mode=4, seed=1234)
rng_helper(self.hs, ticker, ti, "uniform", a=2, b=5, seed=1234)
rng_helper(self.hs, ticker, ti, "vonmisesvariate", mu=0.0, kappa=1.0, seed=1234)
rng_helper(self.hs, ticker, ti, "weibullvariate", alpha=1.0, beta=1.0, seed=1234)
# Test custom random function
import math
rng_helper(self.hs, ticker, ti, "custom", func=lambda dt: math.sin(datetime2unix(dt)))
def test_combine_generators(self):
with HyperStream(file_logger=False, console_logger=False, mqtt_logger=None) as hs:
ti = TimeInterval(t1, t1 + minute)
# Create a clock stream to align the random numbers to
ticker = self.hs.channel_manager.memory.get_or_create_stream("ticker")
hs.tools.clock().execute(sources=[], sink=ticker, interval=ti)
gauss = rng_helper(hs, ticker, ti, "gauss", seed=1234)
import math
custom = rng_helper(hs, ticker, ti, "custom", func=lambda dt: math.sin(datetime2unix(dt)))
merged = self.hs.channel_manager.memory.get_or_create_stream("merged")
hs.tools.aligned_merge().execute(sources=[gauss, custom], sink=merged, interval=ti)
summed = self.hs.channel_manager.memory.get_or_create_stream("summed")
hs.tools.list_sum().execute(sources=[merged], sink=summed, interval=ti)
self.assertListEqual(
summed.window().values(),
list(map(sum, zip(gauss.window().values(), custom.window().values())))
)
def test_data_importers(self):
with HyperStream(file_logger=False, console_logger=False, mqtt_logger=None) as hs:
reader = hs.plugins.data_importers.tools.csv_reader('plugins/data_importers/data/sea_ice.csv')
# noinspection PyTypeChecker
ti = TimeInterval(datetime(1990, 1, 1).replace(tzinfo=UTC), datetime(2011, 4, 1).replace(tzinfo=UTC))
# TODO: More complicated tests, including headers, different delimiters, messy data etc etc.
sea_ice = hs.channel_manager.memory.get_or_create_stream("sea_ice")
reader.execute(sources=[], sink=sea_ice, interval=ti)
sea_ice_sums = hs.channel_manager.mongo.get_or_create_stream("sea_ice_sums")
hs.tools.list_sum().execute(sources=[sea_ice], sink=sea_ice_sums, interval=ti)
# print(sea_ice_sums.window().values())
# TODO: the below assertion is causing travis to fail - why?
# assert_all_close(sea_ice_sums.window().values(), list(map(sum, sea_ice.window().values())), 1e-5)
|
import cv2
class Selector:
window_name: str
dragging: bool
def __init__(self, window_name) -> None:
self.window_name = window_name
self.dragging = False
self.points = ((0, 0), (0, 0))
self.on_selecting = lambda rect: None
self.on_selected = lambda rect: None
self.rect = (0, 0, 0, 0)
try:
cv2.setMouseCallback(window_name, self.on_mouse)
except cv2.error:
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, self.on_mouse)
def on_mouse(self, event: int, x: int, y: int, flags: int, user_data):
if event == 0 and self.dragging:
x0, y0 = self.points[0]
self.points = ((x0, y0), (x, y))
self.rect = (min(x0, x), min(y0, y), abs(x - x0), abs(y - y0))
self.on_selecting(self.rect)
elif event == 1:
self.dragging = True
self.points = ((x, y), (x, y))
self.rect = (x, y, 0, 0)
self.on_selecting(self.rect)
elif event == 4:
self.dragging = False
self.on_selected(self.rect)
|
import pytest
from sqlalchemy.orm import Session
from app.orm import Server
from app.service import power_off
class MockSession(Session):
def commit(self):
pass
def test_mock():
mock = MockSession()
server = Server()
server.id = 1
server.power_on = True
assert power_off(mock, server) is True
assert server.power_on is False |
# KMP Algorithm
import time
start_time = time.time()
def KMPurlMatch(keyword, url):
Key = len(keyword)
Url = len(url)
# create lps[] that will hold the longest prefix suffix
# values for keyword
lps = [0]*Key
j = 0 # index for keyword[]
# Preprocess the keyword (calculate lps[] array)
calculateLPSArray(keyword, Key, lps)
i = 0 # index for url[]
while i < Url:
if keyword[j] == url[i]:
i += 1
j += 1
if j == Key:
print("Keyword found at index " + str(i-j))
j = lps[j-1]
# mismatch after j matches
elif i < Url and keyword[j] != url[i]:
# Do not match lps[0..lps[j-1]] characters,
# they will match anyway
if j != 0:
j = lps[j-1]
else:
i += 1
def calculateLPSArray(keyword, Key, lps):
len = 0 # length of the previous longest prefix suffix
lps[0] # lps[0] is always 0
i = 1
# the loop calculates lps[i] for i = 1 to M-1
while i < Key:
if keyword[i]== keyword[len]:
len += 1
lps[i] = len
i += 1
else:
if len != 0:
len = lps[len-1]
# do not increment i here
else:
lps[i] = 0
i += 1
url = "targetthisstringtargett"
keyword = "target"
KMPurlMatch(keyword, url)
end_time = time.time()
print (f"runtime: {end_time - start_time} seconds")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import os
import datetime
import codecs
import re
try:
import numpy as np
import pandas as pd
except:
pass
from .mapping import StationMapping, ParameterMapping
from .gismo import GISMOdata
from .. import utils
from sharkpylib.file.file_handlers import ListDirectory
from .exceptions import *
import logging
logger = logging.getLogger('gismo_session')
list_dir_object = ListDirectory()
QC1_ROUTINE_LIST = list_dir_object.get_file_object('list_qc1_routines.txt').get()
class PluginFactory(object):
"""
Class hold information about active classes in module.
Also contains method to return an object of a mapped class.
New class in module is activated by adding class name to self.classes.
Also make sure to add required input arguments (for __init__) to self.required_arguments.
"""
def __init__(self):
# Add key and class to dict if you want to activate it
self.classes = {'Ferrybox CMEMS': CMEMSferrybox,
'Fixed platforms CMEMS': CMEMSFixedPlatform,
'PhysicalChemical SHARK': SHARKfilePhysicalChemical,
'DV CTD': DVStandardFormatCTD,
'NODC CTD': NODCStandardFormatCTD}
# ferrybox_requirements = ['data_file_path', 'settings_file_path', 'root_directory']
ferrybox_requirements = ['data_file_path', 'settings_file_path']
fixed_platform_requirements = ferrybox_requirements + ['depth']
shark_requirements = ferrybox_requirements
self.required_arguments = {'Ferrybox CMEMS': ferrybox_requirements,
'Fixed platforms CMEMS': fixed_platform_requirements,
'PhysicalChemical SHARK': shark_requirements,
'DV CTD': shark_requirements,
'NODC CTD': shark_requirements}
def __str__(self):
return '\nSampling type factory. Available sampling types are:\n{}\n'.format('\n'.join(sorted(self.classes)))
def get_list(self):
return sorted(self.classes)
def get_object(self, sampling_type, *args, **kwargs):
if not self.classes.get(sampling_type):
raise GISMOExceptionInvalidClass
kwargs['sampling_type'] = sampling_type
return self.classes.get(sampling_type)(*args, **kwargs)
def get_requirements(self, sampling_type):
"""
Created 20181005
Returns the required arguments needed for the initialisation of the object
:param sampling_type:
:param args:
:param kwargs:
:return:
"""
if not self.classes.get(sampling_type):
raise GISMOExceptionInvalidClass
return self.required_arguments.get(sampling_type)
# ==============================================================================
# ==============================================================================
class GISMOfile(GISMOdata):
"""
Updated 20181005
Base class for a GISMO data file.
A GISMO-file only has data from one sampling type.
"""
# ==========================================================================
def __init__(self, data_file_path=None, settings_file_path=None, root_directory=None, mapping_files=None, **kwargs):
GISMOdata.__init__(self, **kwargs)
# super().__init__()
self.file_path = data_file_path
self.file_id, ending = os.path.splitext(os.path.basename(data_file_path))
self.settings_file_path = settings_file_path
self.export_df = None
self.root_directory = root_directory
self.mapping_files = mapping_files
self.sampling_type = kwargs.get('sampling_type', '')
self._load_settings_file()
self._load_station_mapping()
self._load_parameter_mapping()
self.comment_id = self.settings.get_data('properties', 'comment_id')
self.file_encoding = self.settings.get_data('properties', 'encoding')
self.column_separator = self.settings.get_data('properties', 'column_separator')
if self.column_separator == 'tab':
self.column_separator = '\t'
self._load_data()
self._do_import_changes(**kwargs)
self.parameter_list = []
self.parameter_list = ['time', 'lat', 'lon', 'depth', 'visit_id', 'visit_depth_id'] + self.qpar_list
# self.parameter_list = ['time', 'lat', 'lon', 'depth'] + self.qpar_list
self.filter_data_options = []
self.flag_data_options = ['flags']
self.mask_data_options = ['include_flags', 'exclude_flags']
self.save_data_options = ['file_path', 'overwrite']
# TODO: self.valid_flags = self.settings.flag_list[:]
self.valid_flags = self.settings.get_flag_list()
self.valid_qc_routines = []
@property
def qf_prefix(self):
return self.settings.get_data('parameter_mapping', 'qf_prefix')
@property
def qf_suffix(self):
return self.settings.get_data('parameter_mapping', 'qf_suffix')
# ==========================================================================
def _load_settings_file(self):
# TODO: self.settings = SamplingTypeSettings(self.settings_file_path, root_directory=self.root_directory)
self.settings = SamplingTypeSettings(self.settings_file_path)
# TODO: self.missing_value = str(self.settings.info.missing_value)
self.missing_value = self.settings.get_data('properties', 'missing_value')
# # TODO: nr_decimals = self.settings.info.number_of_decimals_for_float
# nr_decimals = self.settings.get_data('properties', 'number_of_decimals_for_float')
# if nr_decimals:
# self.nr_decimals = '%s.%sf' % ('%', nr_decimals)
# else:
# self.nr_decimals = None
# def _find_mapping_files(self):
# # Mapping files
# if not os.path.exists(self.mapping_files_directory):
# os.makedirs(self.mapping_files_directory)
# self.mapping_files = {}
# for file_name in os.listdir(self.mapping_files_directory):
# if not file_name.endswith('txt'):
# continue
# self.mapping_files[file_name] = os.path.join(self.mapping_files_directory, file_name)
# ==========================================================================
def _load_station_mapping(self):
self.station_mapping = StationMapping(settings_object=self.settings,
mapping_files=self.mapping_files)
# ==========================================================================
def _load_parameter_mapping(self):
self.parameter_mapping = ParameterMapping(settings_object=self.settings,
mapping_files=self.mapping_files)
# ==========================================================================
def _load_data(self, **kwargs):
"""
Updated 20181005
All comment lines are stored in attribute metadata.
:param kwargs:
:return:
"""
logger.info(' Loading file {}'.format(self.file_id))
logger.info(' encoding {}'.format(self.file_encoding))
logger.info(' comment_id {}'.format(self.comment_id))
# Looping through the file seems to be faster then pd.read_csv regardless if there are comment lines or not.
# Note that all values are of type str.
self.metadata_raw = []
header = []
data = []
with codecs.open(self.file_path, encoding=self.file_encoding) as fid:
for line in fid:
if self.comment_id not in [None, '', False] and line.startswith(self.comment_id):
# We have comments and need to load all lines in file
self.metadata_raw.append(line)
else:
if self.metadata_raw and not self.metadata:
self.metadata = SHARKmetadataStandardBase(self.metadata_raw, comment_id=self.comment_id)
self.column_separator = self.metadata.data_delimiter
logger.info('data deliimiter {}'.format(self.column_separator))
split_line = re.split(self.column_separator, line.strip('\n\r'))
# split_line = line.strip('\n\r').split(self.column_separator)
split_line = [item.strip() for item in split_line]
if not header:
header = split_line
else:
data.append(split_line)
self.original_columns = header[:]
self.df = pd.DataFrame(data, columns=header)
# Remove columns with no column name
try:
self.df.drop('', axis=1, inplace=True)
self.original_columns = [col for col in self.original_columns if col]
except KeyError:
pass
self.df.fillna('', inplace=True)
# Find station id (platform type)
# TODO: station = self.settings.column.station
station = self.settings.get_data('mandatory_columns', 'station')
if 'index' in station:
col = int(station.split('=')[-1].strip())
self.external_station_name = self.df.columns[col]
self.internal_station_name = self.station_mapping.get_internal(self.external_station_name)
else:
self.external_station_name = 'Unknown'
self.internal_station_name = 'Unknown'
# self.platform_type = self.station_mapping.get_platform_type(self.external_station_name)
# Save parameters
self.parameters_external = [external for external in self.df.columns if 'Unnamed' not in external]
self.parameters_internal = [self.parameter_mapping.get_internal(external) for external in
self.parameters_external]
self.internal_to_external = dict(zip(self.parameters_internal, self.parameters_external))
self.external_to_internal = dict(zip(self.parameters_external, self.parameters_internal))
self.qpar_list = sorted([par for par in self.parameters_external if self.get_qf_par(par) not in [None, False]])
self.mapped_parameters = [self.parameter_mapping.get_internal(par) for par in self.qpar_list]
# ==========================================================================
def _do_import_changes(self, **kwargs):
self._add_columns(**kwargs)
self.df.replace(self.missing_value, '', inplace=True)
# ==========================================================================
def _prepare_export(self):
# Make a copy to be used for export
self.export_df = self.df[self.original_columns].copy()
try:
self.export_df.replace(np.nan, float(self.missing_value), inplace=True)
except:
pass
def _get_argument_list(self, arg):
"""
Updated 20181004
Returns a list. If type(arg) != list/array/tuple, [arg] is returned
:param arg:
:return: list
"""
if type(arg) in [list, tuple, np.array, np.ndarray]:
return list(arg)
else:
return [arg]
def _get_pandas_series(self, value):
"""
Created 20181005
:param value: boolean or value
:return: a pandas series of length len(self.df) with the given value.
"""
if type(value) == bool:
if value:
return pd.Series(np.ones(len(self.df), dtype=bool))
else:
return pd.Series(np.zeros(len(self.df), dtype=bool))
else:
return pd.Series([value]*len(self.df))
# ==========================================================================
def _add_columns(self, **kwargs):
"""
Add columns for time, lat, lon and depth.
Information about parameter name should be in settings.
"""
# print '='*30
# for c in sorted(self.df.columns):
# print c
# print '-'*30
# ----------------------------------------------------------------------
# Time
time_formats = ['%Y%m%d%H%M',
'%Y%m%d%H:%M',
'%Y%m%d%H.%M',
'%Y-%m-%d%H%M',
'%Y-%m-%d%H:%M',
'%Y-%m-%d%H.%M']
self.time_format = None
datetime_list = []
# TODO: time_par = self.settings.column.time
time_par = self.settings.get_data('mandatory_columns', 'time')
if 'index' in time_par:
# At this moment mainly for CMEMS-files
time_par = self.df.columns[int(time_par.split('=')[-1].strip())]
self.df['time'] = pd.to_datetime(self.df[time_par], format=self.time_format)
else:
# TODO: time_pars = self.settings.column.get_list('time')
# TODO: self.df['time'] = self.df[time_pars].apply(apply_datetime_object_to_df, axis=1)
self.df['time'] = self.df[time_par].apply(apply_datetime_object_to_df, axis=1)
# print(time_pars)
# for i in range(len(self.df)):
# # First look in settings and combine
# value_list = []
# for par in time_pars:
# value_list.append(self.df.ix[i, par])
#
# value_str = ''.join(value_list)
#
# if not self.time_format:
# for tf in time_formats:
# try:
# datetime.datetime.strptime(value_str, tf)
# self.time_format = tf
# break
# except:
# pass
#
# datetime_list.append(datetime.datetime.strptime(value_str, self.time_format))
#
# self.df['time'] = pd.Series(datetime_list)
# ----------------------------------------------------------------------
# Position
# TODO: lat_par = self.parameter_mapping.get_external(self.settings.column.lat)
# TODO: lon_par = self.parameter_mapping.get_external(self.settings.column.lon)
lat_par = self.parameter_mapping.get_external(self.settings.get_data('mandatory_columns', 'lat'))
lon_par = self.parameter_mapping.get_external(self.settings.get_data('mandatory_columns', 'lon'))
self.df['lat'] = self.df[lat_par]
self.df['lon'] = self.df[lon_par]
# ----------------------------------------------------------------------
# Station ID
self.df['visit_id'] = self.df['lat'].astype(str) + self.df['lon'].astype(str) + self.df['time'].astype(str)
print(kwargs.get('depth'))
if kwargs.get('depth', None) is not None:
self.df['depth'] = kwargs.get('depth')
else:
# TODO: depth_par = self.parameter_mapping.get_external(self.settings.column.depth)
depth_par = self.parameter_mapping.get_external(self.settings.get_data('mandatory_columns', 'depth'))
if 'meter' in depth_par:
self.df['depth'] = depth_par.split()[0].strip()
else:
try:
self.df['depth'] = self.df[depth_par].astype(float)
except KeyError as e:
raise GISMOExceptionInvalidParameter(e)
self.df['visit_depth_id'] = self.df['lat'].astype(str) + self.df['lon'].astype(str) + self.df['time'].astype(
str) + self.df['depth'].astype(str)
def add_qc_comment(self, comment, **kwargs):
"""
:param comment:
:return:
"""
if not self.has_metadata:
raise GISMOExceptionNoMetadata
self.metadata.add_qc_comment(comment, **kwargs)
def flag_data(self, flag, *args, **kwargs):
"""
Created 20181005
:param flag: The flag you want to set for the parameter
:param args: parameters that you want to flag.
:param kwargs: conditions for flagging. Options are listed in self.flag_data_options
:return: None
"""
flag = str(flag)
if flag not in self.valid_flags:
raise GISMOExceptionInvalidFlag('"{}", valid flags are "{}"'.format(flag, ', '.join(self.valid_flags)))
if flag == 'no flag':
flag = ''
# Check dependent parameters
all_args = []
for arg in args:
all_args.append(arg)
all_args.extend(self.get_dependent_parameters(arg))
# Work on external column names
args = [self.internal_to_external.get(arg, arg) for arg in all_args]
# args = dict((self.internal_to_external.get(key, key), key) for key in args)
# if not all([arg in self.df.columns for arg in args]):
# raise GISMOExceptionInvalidInputArgument
# kwargs contains conditions for flagging. Options are listed in self.flag_data_options.
boolean = self._get_pandas_series(True)
for key, value in kwargs.items():
# Check valid option
if key not in self.flag_data_options:
raise GISMOExceptionInvalidOption
if key == 'time':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.time.isin(value_list))
elif key == 'time_start':
boolean = boolean & (self.df.time >= value)
elif key == 'time_end':
boolean = boolean & (self.df.time <= value)
elif key == 'depth':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.depth.isin(value_list))
elif key == 'depth_min':
boolean = boolean & (self.df.depth >= value)
elif key == 'depth_max':
boolean = boolean & (self.df.depth <= value)
# Flag data
for par in args:
if par not in self.df.columns:
print('par', par)
print()
print('\n'.join(sorted(self.df.columns)))
raise GISMOExceptionInvalidParameter('Parameter {} not in data'.format(par))
qf_par = self.get_qf_par(par)
if not qf_par:
raise GISMOExceptionMissingQualityParameter('for parameter "{}"'.format(par))
flag_list = kwargs.get('flags', None)
if flag_list:
if type(flag_list) != list:
flag_list = [flag_list]
if 'no flag' in flag_list:
flag_list.pop(flag_list.index('no flag'))
flag_list.append('')
par_boolean = boolean & (self.df[qf_par].isin(flag_list))
else:
par_boolean = boolean.copy(deep=True)
# print(par, qf_par, flag)
self.df.loc[par_boolean, qf_par] = flag
# ==========================================================================
def old_get_boolean_for_time_span(self, start_time=None, end_time=None, invert=False):
"""
:param start_time:
:param end_time:
:param invert:
:return:
"""
if start_time and end_time:
boolean_array = np.array((self.df.time >= start_time) & (self.df.time <= end_time))
elif start_time:
boolean_array = np.array(self.df.time >= start_time)
elif end_time:
boolean_array = np.array(self.df.time <= end_time)
else:
boolean_array = np.ones(len(self.df.time), dtype=bool)
if invert:
return np.invert(boolean_array)
else:
return boolean_array
def get_data(self, *args, **kwargs):
"""
Created 20181024
Updated 20181106
:param args: parameters that you want to have data for.
:param kwargs: specify filter. For example profile_id=<something>. Only = if implemented at the moment.
:return: dict with args as keys and list(s) as values.
"""
# Always return type float if possible
kw = {'type_float': True}
kw.update(kwargs)
return self._get_data(*args, **kw)
# ===========================================================================
def _get_data(self, *args, **kwargs):
"""
Created 20181004
Updated 20181024
:param args: parameters that you want to have data for.
:param kwargs: specify filter.
:return: dict with args as keys and list(s) as values.
"""
if not args:
raise GISMOExceptionMissingInputArgument
# Work on external column names
args = dict((self.internal_to_external.get(key, key), key) for key in args)
for arg in args:
if arg not in self.df.columns:
raise GISMOExceptionInvalidInputArgument(arg)
elif arg not in self.parameter_list:
raise GISMOExceptionInvalidInputArgument(arg)
# Create filter boolean
boolean = self._get_pandas_series(True)
for key, value in kwargs.get('filter_options', {}).items():
if value in [None, False]:
continue
if key not in self.filter_data_options:
raise GISMOExceptionInvalidOption('{} not in {}'.format(key, self.filter_data_options))
if key == 'time':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.time.isin(value_list))
elif key == 'time_start':
boolean = boolean & (self.df.time >= value)
elif key == 'time_end':
boolean = boolean & (self.df.time <= value)
elif key == 'visit_depth_id':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.visit_depth_id.isin(value_list))
elif key == 'visit_id':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.visit_id.isin(value_list))
elif key == 'depth':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.depth.isin(value_list))
elif key == 'depth_min':
boolean = boolean & (self.df.depth >= value)
elif key == 'depth_max':
boolean = boolean & (self.df.depth <= value)
# Extract filtered dataframe
# filtered_df = self.df.loc[boolean, sorted(args)].copy(deep=True)
filtered_df = self.df.loc[boolean].copy(deep=True)
mask_options = kwargs.get('mask_options', {})
# Create return dict and return
return_dict = {}
for par in args:
par_array = filtered_df[par].values
# if par == 'time':
# par_array = filtered_df[par].values
# elif
# try:
# par_array = filtered_df[par].astype(float).values
# except:
# Check mask options
for opt, value in mask_options.items():
if opt not in self.mask_data_options:
raise GISMOExceptionInvalidOption
if opt == 'include_flags':
qf_par = self.get_qf_par(par)
if not qf_par:
continue
# print('\n'.join(sorted(filtered_df.columns)))
qf_list = []
for v in value:
if v == 'no flag':
v = ''
else:
v = str(v)
qf_list.append(v)
keep_boolean = filtered_df[qf_par].astype(str).isin(qf_list)
par_array[~keep_boolean] = ''
elif opt == 'exclude_flags':
qf_par = self.get_qf_par(par)
if not qf_par:
continue
qf_list = []
for v in value:
if v == 'no flag':
v = ''
else:
v = str(v)
qf_list.append(v)
nan_boolean = filtered_df[qf_par].astype(str).isin(qf_list)
par_array[nan_boolean] = ''
# Check output type
if par == 'time':
pass
elif kwargs.get('type_float') is True or par in kwargs.get('type_float', []):
float_par_list = []
for value in par_array:
try:
if value:
float_par_list.append(float(value))
else:
float_par_list.append(np.nan)
except:
float_par_list.append(value)
#raise ValueError
par_array = np.array(float_par_list)
elif kwargs.get('type_int') is True or par in kwargs.get('type_int', []):
float_par_list = []
for value in par_array:
try:
if value:
float_par_list.append(float(value))
else:
float_par_list.append(np.nan)
except:
float_par_list.append(value)
# raise ValueError
par_array = np.array(float_par_list)
# Map to given column name
return_dict[args[par]] = par_array
return return_dict
# ==========================================================================
def get_dependent_parameters(self, par):
if not self.settings:
return None
par = self.parameter_mapping.get_external(par)
# print('FLAG, dependent', par, type(par))
return self.settings.get_data('dependent_parameters', par, [])
def get_parameter_list(self, **kwargs):
if kwargs.get('external'):
par_list = sorted(self.parameter_list)
else:
par_list = sorted([self.parameter_mapping.get_internal(par) for par in self.parameter_list])
return par_list
def get_position(self, **kwargs):
data = self.get_data('lat', 'lon')
if 'lat' in self.df:
return [data['lat'], data['lon']]
else:
raise GISMOExceptionMethodNotImplemented
def get_dict_with_matching_parameters(self, match_parameter):
"""
Returns a dictionary for the parameters that matches the parameters in matchparameters.
key is name in self.df, values is name in match_parameter.
:param match_parameter:
:return:
"""
return_dict = {}
par_list = list(set(self.get_parameter_list(internal=True) + self.get_parameter_list(external=True)))
for par in par_list:
for m_par in match_parameter:
if m_par.lower() in par.lower():
return_dict[self.get_internal_parameter_name(par)] = m_par
continue
return return_dict
def get_internal_parameter_name(self, parameter):
"""
:param parameter:
:return: internal name of the given parameter.
"""
return self.parameter_mapping.get_internal(parameter)
def get_external_parameter_name(self, parameter):
"""
:param parameter:
:return: external name of the given parameter.
"""
return self.parameter_mapping.get_external(parameter)
def get_unit(self, par='', **kwargs):
"""
Returns the unit of the given parameter in found.
:param par:
:return:
"""
return self.parameter_mapping.get_unit(par)
def get_qf_list(self, *args, **kwargs):
"""
Returns a list of quality flags for the parameter par.
:param par:
:return:
"""
not_valid_par = []
for par in args:
if par not in self.df.columns:
not_valid_par.append(par)
if not_valid_par:
raise GISMOExceptionInvalidParameter('; '.join(not_valid_par))
return_dict = {}
for par in args:
qf_par = self.get_qf_par(par)
if not qf_par:
continue
return_dict[par] = list(self.df[qf_par])
return return_dict
def get_qf_par(self, par, internal_name=False):
"""
Updated 20181004
:param par:
:return:
"""
# TODO: prefix = self.settings.parameter_mapping.qf_prefix
# TODO: suffix = self.settings.parameter_mapping.qf_suffix
prefix = self.settings.get_data('parameter_mapping', 'qf_prefix')
suffix = self.settings.get_data('parameter_mapping', 'qf_suffix')
# First check if prefix and/or suffix is given
if not any([prefix, suffix]):
print('No prefix or suffix given to this QF parameter')
return
if par in self.parameters_internal:
par = self.internal_to_external[par]
# TODO:
# if self.settings.parameter_mapping.unit_starts_with:
# par = par.split(self.settings.parameter_mapping.unit_starts_with)[0].strip()
unit_starts_with = self.settings.get_data('parameter_mapping', 'unit_starts_with')
if unit_starts_with:
par = par.split(unit_starts_with)[0].strip()
# print 'par-', par
# QF parameter is found whenever prefix or suffix matches the given par.
# This means that if prefix="QF_" and par="TEMP", not only "QF_TEMP" is recognised but also "QF_TEMP (C)"
for ext_par in self.parameters_external:
if par in ext_par and ext_par.startswith(prefix) and ext_par.endswith(suffix):
if ext_par != par:
# print 'ext_par', ext_par, par, prefix, suffix
if internal_name:
return self.get_internal_parameter_name(ext_par)
return ext_par
return False
def get_metadata_tree(self):
if self.metadata:
return self.metadata.get_metadata_tree()
else:
raise GISMOExceptionMethodNotImplemented
# ==========================================================================
def _get_extended_qf_list(self, qf_list):
"""
The pandas datafram may contain both str and int value in the qf-columns.
This method adds both str and int versions of the given qf_list.
"""
if not type(qf_list) == list:
qf_list = [qf_list]
# Add both int ans str versions of the flags
extended_qf_list = []
for qf in qf_list:
extended_qf_list.append(qf)
if type(qf) == int:
extended_qf_list.append(str(qf))
elif qf.isdigit():
extended_qf_list.append(int(qf))
return extended_qf_list
# ===========================================================================
def save_file(self, **kwargs):
file_path = kwargs.get('file_path', None)
if not file_path:
file_path = self.file_path
if os.path.exists(file_path) and not kwargs.get('overwrite', False):
raise GISMOExceptionFileExcists(file_path)
# write_kwargs = {'index_label': False,
# 'index': False,
# 'sep': '\t',
# 'float_format': self.nr_decimals,
# 'decimal': '.'}
#
# write_kwargs.update(kwargs)
self._prepare_export()
data_dict = self.export_df.to_dict('split')
with codecs.open(file_path, 'w', encoding=self.file_encoding) as fid:
if self.metadata and self.metadata.has_data:
fid.write('\n'.join(self.metadata.get_lines()))
fid.write('\n')
# Write column header
fid.write(self.column_separator.join(data_dict['columns']))
fid.write('\n')
for line in data_dict['data']:
fid.write(self.column_separator.join(line))
fid.write('\n')
class CMEMSferrybox(GISMOfile):
"""
A GISMO-file only has data from one platform.
"""
# ==========================================================================
def __init__(self, data_file_path=None, settings_file_path=None, root_directory=None, **kwargs):
"""
Updated 20181005
:param data_file_path:
:param settings_file_path:
:param root_directory:
:param kwargs:
"""
kwargs.update(dict(data_file_path=data_file_path,
settings_file_path=settings_file_path,
root_directory=root_directory))
#for key in sorted(kwargs):
# print(key, kwargs[key])
GISMOfile.__init__(self, **kwargs)
self.data_type = 'trajectory'
self.filter_data_options = self.filter_data_options + ['time', 'time_start', 'time_end']
self.flag_data_options = self.flag_data_options + ['time', 'time_start', 'time_end']
self.mask_data_options = self.mask_data_options + []
self.valid_qc_routines = ['Mask areas']
self._check_qf_columns()
def _check_qf_columns(self):
self.error_in_qc_columns = False
for par in self.qpar_list:
qpar = self.get_qf_par(par)
for value in self.df[qpar]:
if '.' in value:
self.df[qpar] = self.df[qpar].str[0]
self.error_in_qc_columns = True
break
if self.error_in_qc_columns:
logger.warning(f'QC columns are float in file: {self.file_id}')
# raise GISMOExceptionQCfieldError
# ==============================================================================
class CMEMSFixedPlatform(GISMOfile):
"""
A GISMO-file only has data from one platform.
"""
# ==========================================================================
def __init__(self, data_file_path=None, settings_file_path=None, root_directory=None, depth=None, **kwargs):
"""
Updated 20181022
B
:param data_file_path:
:param settings_file_path:
:param root_directory:
:param kwargs:
"""
kwargs.update(dict(data_file_path=data_file_path,
settings_file_path=settings_file_path,
root_directory=root_directory,
depth=depth))
GISMOfile.__init__(self, **kwargs)
self.data_type = 'timeseries'
# self.parameter_list = ['time', 'lat', 'lon', 'depth'] + self.qpar_list
self.filter_data_options = self.filter_data_options + ['time', 'time_start', 'time_end']
self.flag_data_options = self.flag_data_options + ['time', 'time_start', 'time_end']
self.mask_data_options = self.mask_data_options + []
self.valid_qc_routines = ['QC from ODV Spreadsheet']
def get_station_name(self, external=False):
if external:
return self.df.columns[0]
else:
return self.station_mapping.get_internal(self.df.columns[0])
# ==============================================================================
# ==============================================================================
class DVStandardFormatCTD(GISMOfile):
"""
A DATA-file has data from several platforms. Like SHARKweb Physical/Chemical columns.
"""
# ==========================================================================
def __init__(self, data_file_path=None, settings_file_path=None, root_directory=None, **kwargs):
"""
Updated 20181005
:param file_path:
:param settings_file_path:
:param root_directory:
:param kwargs:
"""
kwargs.update(dict(data_file_path=data_file_path,
settings_file_path=settings_file_path,
root_directory=root_directory))
GISMOfile.__init__(self, **kwargs)
self.data_type = 'profile'
self.filter_data_options = self.filter_data_options + ['depth', 'depth_min', 'depth_max']
self.flag_data_options = self.flag_data_options + ['depth', 'depth_min', 'depth_max']
self.mask_data_options = self.mask_data_options + []
self.valid_qc_routines = ['Profile DV Standard format', 'Profile range simple', 'Profile report']
def get_position(self, *kwargs):
return [float(self.df['lat'].values[0]), float(self.df['lon'].values[0])]
def get_time(self):
return [self.df['time'].values[0]]
def get_station_name(self, **kwargs):
"""
Station can be found in metadata or self.df['station']
:return:
"""
return self.metadata.data['METADATA'].get_statn()
# ==============================================================================
# ==============================================================================
class SHARKmetadataStandardBase(object):
"""
Created 20180928
Updated 20181003
Class holds metadata information of a GISMO file.
"""
class MetadataBase(object):
def __init__(self, **kwargs):
self.comment_id = kwargs.get('comment_id', '//')
self.delimiter = None
self.metadata_string = None
self.metadata_id = None
self.data = None
@property
def metadata_id(self):
return self.__metadata_id
@metadata_id.setter
def metadata_id(self, string):
self.__metadata_id = string
self.metadata_string = '{}{}'.format(self.comment_id, self.__metadata_id)
@property
def has_data(self):
if len(self.data): # To bie able to check pandas dataframe
return True
else:
return False
def add(self, comment):
raise GISMOExceptionMethodNotImplemented
def get_rows(self):
raise GISMOExceptionMethodNotImplemented
def set(self):
raise GISMOExceptionMethodNotImplemented
def get_metadata_tree(self):
raise GISMOExceptionMethodNotImplemented
class FileInfo(MetadataBase):
""" Class to handle the file info found at the top of the file """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = []
def add(self, line):
""" Whole line is given in line """
self.data.append(line)
def set(self, **kwargs):
pass
def get_rows(self):
return self.data[:] # Make sure to return copy
def get_metadata_tree(self):
return_dict = {}
for line in self.data:
split_line = line.strip().strip(self.comment_id).split(self.delimiter)
return_dict[split_line[0]] = {'value': split_line[1]}
return return_dict
class Metadata(MetadataBase):
""" Class to handle the METADATA """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.metadata_id = 'METADATA'
self.data = {}
self.item_order = []
def add(self, item_list, **kwargs):
"""
:param item_list:
:param kwargs:
:return:
"""
if self.metadata_id not in item_list[0]:
raise GISMOExceptionMetadataError('Non matching metadata string: {}'.format(item_list[0]))
self.data[item_list[1]] = item_list[2]
self.item_order.append(item_list[1])
def get_rows(self):
return_list = []
for key in self.item_order:
line_list = [self.metadata_string, key, self.data[key]]
return_list.append(self.delimiter.join(line_list))
return return_list
def get_metadata_tree(self):
return_dict = {}
for key, value in self.data.items():
return_dict[key] = {'value': value}
return return_dict
def get_statn(self):
return self.data.get('STATN')
class SensorInfo(MetadataBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.metadata_id = 'SENSORINFO'
self.data = None
def add(self, item_list, **kwargs):
"""
If self.data is empty item_list is the header to the dataframe.
"""
if self.metadata_id not in item_list[0]:
raise GISMOExceptionMetadataError('Non matching metadata string: {}'.format(item_list[0]))
row_list = item_list[1:]
if self.data is None:
self.header = row_list
self.data = pd.DataFrame([], columns=row_list)
else:
df = pd.DataFrame([row_list], columns=self.header)
self.data = self.data.append(df)
self.data.reset_index(inplace=True)
self.data.pop('index')
def set(self, **kwargs):
pass
def get_rows(self):
return_list = []
line_list = [self.metadata_string] + self.header
return_list.append(self.delimiter.join(line_list))
for i in self.data.index:
line_list = [self.metadata_string] + list(self.data.iloc[i].values)
return_list.append(self.delimiter.join(line_list))
return return_list
def get_metadata_tree(self):
par_key = 'PARAM_SIMPLE'
columns = [item for item in self.data.columns if item != par_key]
return_dict = {}
level_dict = return_dict
for k in self.data.index:
line_dict = dict(zip(self.data.columns, self.data.iloc[k].values))
par = line_dict[par_key]
return_dict.setdefault(par, {'children': {}})
for item in columns:
return_dict[par]['children'][item] = {'value': line_dict[item]}
return return_dict
class Information(MetadataBase):
""" Class to handle the INFORMATION """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.metadata_id = 'INFORMATION'
self.data = []
def add(self, item_list, **kwargs):
"""
"""
if self.metadata_id not in item_list[0]:
raise GISMOExceptionMetadataError('Non matching metadata string: {}'.format(item_list[0]))
self.data.append(self.delimiter.join(item_list[1:]))
def set(self, **kwargs):
pass
def get_rows(self):
return_list = []
for item in self.data:
return_list.append(self.delimiter.join([self.metadata_string, item]))
return return_list
def get_metadata_tree(self):
return_dict = {}
for k, item in enumerate(self.data):
return_dict['Comment nr {}'.format(k+1)] = {'value': item}
return return_dict
class InstrumentMetadata(MetadataBase):
""" Class to handle the INFORMATION """
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.metadata_id = 'INSTRUMENT_METADATA'
self.data = []
def add(self, item_list, **kwargs):
"""
"""
if self.metadata_id not in item_list[0]:
raise GISMOExceptionMetadataError('Non matching metadata string: {}'.format(item_list[0]))
self.data.append(kwargs.get('original_line').strip('\r\n'))
def set(self, **kwargs):
pass
def get_rows(self):
return self.data[:]
def get_metadata_tree(self):
return_dict = {}
for k, item in enumerate(self.data):
return_dict['line {}'.format(k + 1)] = {'value': item}
return return_dict
class CommentQC(MetadataBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.metadata_id = 'COMNT_QC'
self.data = []
def add(self, comment, **kwargs):
if type(comment) == list:
comment = ';'.join(comment)
if kwargs.get('as_is'):
self.data.append(comment)
else:
if type(comment) == list:
comment = ';'.join(comment)
comment = comment.strip(self.metadata_string)
time_string = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
self.data.append(self.delimiter.join([self.metadata_string, time_string, comment.strip()]))
def get_rows(self):
return self.data[:]
def get_metadata_tree(self):
return self.data[:]
class CommentUNIT(MetadataBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.metadata_id = 'COMNT_UNIT'
self.data = []
def add(self, comment, **kwargs):
if type(comment) == list:
comment = ';'.join(comment)
if kwargs.get('as_is'):
self.data.append(comment)
else:
comment = comment.strip(self.metadata_string)
time_string = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
self.data.append(self.delimiter.join([self.metadata_string, time_string, comment.strip()]))
def get_rows(self):
return self.data[:]
def get_metadata_tree(self):
return self.data[:]
# ==========================================================================
def __init__(self, metadata_raw_lines, **kwargs):
self.metadata_raw_lines = metadata_raw_lines
self.comment_id = kwargs.get('comment_id', '//')
self.has_data = False
self._load_metadata()
def __str__(self):
return str(self.get_metadata_tree())
# ==========================================================================
def _load_metadata(self):
"""
Updated 20181003
"""
kw = dict(comment_id=self.comment_id)
self.file_info = self.FileInfo(**kw)
self.data = {'METADATA': self.Metadata(**kw),
'SENSORINFO': self.SensorInfo(**kw),
'INFORMATION': self.Information(**kw),
'INSTRUMENT_METADATA': self.InstrumentMetadata(**kw),
'COMNT_UNIT': self.CommentUNIT(**kw),
'COMNT_QC': self.CommentQC(**kw)}
self.metadata_order = []
self.metadata_delimiter = None
self.data_delimiter = None
self.file_format = None
for k, original_line in enumerate(self.metadata_raw_lines):
line = original_line.strip()
if line.startswith('{}FORMAT'.format(self.comment_id)):
self.file_format = line.split('=')[-1].strip()
self.file_info.add(line)
elif line.startswith('{}METADATA_DELIMITER'.format(self.comment_id)):
self.metadata_delimiter = line.split('=')[-1].strip()
for key, mdata in self.data.items():
mdata.delimiter = self.metadata_delimiter
self.file_info.add(line)
elif line.startswith('{}DATA_DELIMITER'.format(self.comment_id)):
delim = line.split('=')[-1].strip()
if delim == '\\t':
delim = '\t'
self.data_delimiter = delim
self.file_info.add(line)
else:
metadata_id = line.split(self.metadata_delimiter)[0].strip(self.comment_id)
if metadata_id in self.data:
split_line = line.split(self.metadata_delimiter)
self.data[metadata_id].add(split_line, original_line=original_line, as_is=True)
if metadata_id not in self.metadata_order:
self.metadata_order.append(metadata_id)
else:
raise GISMOExceptionMetadataError('New field in metadata is not handled properly: {}'.format(metadata_id))
self.has_data = True
# self.metadata_order.append('COMNT_UNIT')
# self.metadata_order.append('COMNT_QC')
def add_qc_comment(self, comment, **kwargs):
self.data['COMNT_QC'].add(comment, **kwargs)
def get_lines(self):
all_lines = self.file_info.get_rows()
for metadata_type in self.metadata_order:
if self.data[metadata_type].has_data:
# all_lines = all_lines + self.data[metadata_type].get_rows()
all_lines.extend(self.data[metadata_type].get_rows())
return all_lines
def get_metadata_tree(self, subcat=False):
if subcat:
return self.data[subcat].get_metadata_tree()
else:
return_dict = {}
for key in self.metadata_order:
return_dict[key] = {'children': self.data[key].get_metadata_tree()}
return return_dict
# ==============================================================================
# ==============================================================================
class old_SamplingTypeSettings(dict):
"""
Reads and stores information from a "GISMO" Settings file.
"""
# ==========================================================================
# ==========================================================================
class MappingObject(dict):
def __init__(self, data, root_directory=None):
for line in data:
split_line = [item.strip() for item in line.split('\t')]
if len(split_line) == 1:
header = split_line[0]
value = ''
else:
header, value = split_line[:2]
header = header.lower().replace(' ', '_')
if 'root' in value:
if not root_directory:
raise GISMOExceptionMissingPath('Must provide root_directory')
value = value.replace('root', root_directory)
if not os.path.exists(value):
raise GISMOExceptionMissingPath(value)
if ';' in value:
value = [item.strip() for item in value.split(';')]
self[header] = value
setattr(self, header, value)
# ======================================================================
def get_list(self, item):
value = getattr(self, item)
if not isinstance(value, list):
value = [value]
return value
# ==========================================================================
def __init__(self, file_path=None, root_directory=None):
self.file_path = file_path
self.root_directory = root_directory
dict.__init__(self)
if self.file_path:
self._load_file()
self._save_data()
# ==========================================================================
def _load_file(self):
self.data = {}
current_header = None
fid = codecs.open(self.file_path, 'r', encoding='cp1252')
for line in fid:
line = line.strip('\r\n')
# Blank line or comment line
if not line or line.startswith('#'):
continue
# Find header
if line.startswith('='):
current_header = line.strip('= ')
self.data[current_header] = []
else:
self.data[current_header].append(line)
fid.close()
# ==========================================================================
def _save_data(self):
for key in self.data:
# ------------------------------------------------------------------
if key.lower() == 'flags':
self['flags'] = {}
self.description_to_flag = {}
for i, line in enumerate(self.data[key]):
split_line = [item.strip() for item in line.split('\t')]
if i == 0:
header = [item.lower() for item in split_line]
else:
qf = split_line[header.index('qf')]
if qf == '':
qf = 'no flag'
self['flags'][qf] = {}
for par, item in zip(header, split_line):
if par == 'markersize':
item = int(item)
elif par == 'description':
self.description_to_flag[item] = qf
self['flags'][qf][par] = item
self.flag_list = sorted(self['flags'])
# ------------------------------------------------------------------
elif key.lower() == 'dependent parameters':
self['dependencies'] = {}
for line in self.data[key]:
split_line = [item.strip() for item in line.split(';')]
primary_parameter = split_line[0]
dependent_parameters = []
for item in split_line[1:]:
if ':' in item:
# Range of integers
from_par, to_par = [int(par.strip()) for par in item.split(':')]
dependent_parameters.extend(list(map(str, range(from_par, to_par+1))))
else:
dependent_parameters.append(item)
# # Map parameters
# split_line = [CMEMSparameters().get_smhi_code(par) for par in split_line]
# print(primary_parameter, type(primary_parameter))
# print('dependent_parameters'.upper(), dependent_parameters)
self['dependencies'][primary_parameter] = dependent_parameters
# ------------------------------------------------------------------
elif key.lower() == 'ranges':
self['ranges'] = {}
for i, line in enumerate(self.data[key]):
split_line = [item.strip() for item in line.split(u'\t')]
if i == 0:
header = [item.lower() for item in split_line]
else:
limit_dict = dict(zip(header, split_line))
par = split_line[header.index('parameter')]
self['ranges'][par] = {}
# print header
for limit in [item for item in header if item != 'parameter']:
if limit_dict[limit]:
value = float(limit_dict[limit])
self['ranges'][par][limit] = value
# ------------------------------------------------------------------
elif key.lower() == 'parameter mapping':
self.parameter_mapping = self.MappingObject(self.data[key], self.root_directory)
# ------------------------------------------------------------------
elif key.lower() == 'station mapping':
self.station_mapping = self.MappingObject(self.data[key], self.root_directory)
# ------------------------------------------------------------------
elif key.lower() == 'info':
self.info = self.MappingObject(self.data[key], self.root_directory)
# ------------------------------------------------------------------
elif key.lower() == 'column':
self.column = self.MappingObject(self.data[key], self.root_directory)
# ------------------------------------------------------------------
elif key.lower() == 'matching criteria':
self.matching_criteria = self.MappingObject(self.data[key], self.root_directory)
# ------------------------------------------------------------------
elif key.lower() == 'map':
self.map = self.MappingObject(self.data[key], self.root_directory)
# ==================================================================
def get_flag_list(self):
return self.flag_list
# ==================================================================
def get_flag_description(self, flag):
return self['flags'][flag]['description']
# ==================================================================
def get_flag_description_list(self):
return [self.get_flag_description(flag) for flag in self.flag_list]
# # ==================================================================
# def get_flag_color(self, flag):
# return self['flags'][flag]['color']
#
# # ==================================================================
# def get_flag_color_list(self):
# return [self.get_flag_color(flag) for flag in self.flag_list]
#
# # ==================================================================
# def get_flag_markersize(self, flag):
# return self['flags'][flag]['markersize']
#
# # ==================================================================
# def get_flag_markersize_list(self):
# return [self.get_flag_markersize(flag) for flag in self.flag_list]
#
# # ==================================================================
# def get_flag_marker(self, flag):
# return self['flags'][flag]['marker']
#
# # ==================================================================
# def get_flag_marker_list(self):
# return [self.get_flag_marker(flag) for flag in self.flag_list]
# ==================================================================
def get_flag_from_description(self, description):
return self.description_to_flag[description]
# ==================================================================
def get_flag_prop_dict(self, flag):
flag = str(flag)
if self:
dont_include = ['qf', 'description']
# print('='*50)
# print(self['flags'][flag])
# print('=' * 50)
return {par: item for par, item in self['flags'][flag].items() if par not in dont_include}
else:
return {}
# ==================================================================
def _get_default_dict(self):
pass
class SamplingTypeSettings(object):
def __init__(self, file_name, directory=None, data={}):
self.file_name = file_name
self.directory = directory
self.data = data
self._add_suffix()
if not self.directory:
self.directory = self._get_settings_files_directory()
self.file_path = os.path.join(self.directory, self.file_name)
self._save_file_id()
if not data:
self._load(True)
def __str__(self):
return f'Settings file: {self.file_name}'
def __repr__(self):
return f'{self.__class__.__name__}({self.file_name, self.directory}'
def _add_suffix(self):
if not self.file_name.endswith('.json'):
self.__file_name = self.file_name + '.json'
def _save_file_id(self):
self.file_id = os.path.basename(self.file_path).split('.')[0]
@property
def file_name(self):
return self.__file_name
@file_name.setter
def file_name(self, items):
if type(items) == list or type(items) == tuple:
if len(items) == 2:
# Second item is directory
file_name = items[0]
directory = items[1]
else:
file_name = items[0]
directory = self._get_settings_files_directory()
else:
file_name = items
directory = self._get_settings_files_directory()
self.__file_name = file_name
self._add_suffix()
self.directory = directory
self.file_path = os.path.join(self.directory, self.file_name)
def _get_settings_files_directory(self):
return os.path.join(os.path.dirname(__file__), 'settings_files')
def _save(self):
print('DIRECTORY', self.directory)
if self.directory == self._get_settings_files_directory():
print('SET', self._get_settings_files_directory())
raise GISMOExceptionSaveNotAllowed(self.file_path)
utils.save_json(self.data, self.file_path, encoding='cp1252')
def _load(self, create_if_missing=False):
# print(self.file_path)
self.data = utils.load_json(self.file_path, create_if_missing=create_if_missing, encoding='cp1252')
def get_all_data(self):
return self.data.copy()
def save(self):
self._save()
def _check_info_type(self, info_type):
if self.data.get(info_type, None) is None:
raise GISMOExceptionInvalidOption(f'info_type: {info_type}')
def get_data(self, info_type, key=None, default=None):
"""
Returns data from the info_type from the given key.
:param info_type:
:param key:
:return:
"""
self._check_info_type(info_type)
if key:
value = self.data.get(info_type).get('data').get(key, None)
if value is None:
if default is None:
raise GISMOExceptionInvalidOption(f'key: {key}')
else:
value = default
return value
else:
return self.data.get(info_type).get('data')
def set_data(self, info_type, key, value):
type_value = type(self.get_data(info_type, key))
if type_value != type(value):
raise TypeError(f'{value} must be of type {type_value} not {type(value)}')
self.data[info_type]['data'][key] = value
def add_data(self, info_type, key, value):
self._check_info_type(info_type)
if key not in self.data[info_type]:
self.data[info_type]['data'][key] = value
def remove_data(self, info_type, key):
self._check_info_type(info_type)
if key in self.data[info_type]['data']:
self.data[info_type]['data'].pop(key)
def get_flag_list(self):
return list(self.data.get('flags').get('data').keys())
def get_flag_description(self, flag):
return self.data.get('flags').get('data').get(flag)
def get_flag_from_description(self, description):
for key, value in self.data.get('flags').get('data').items():
if value == description:
return key
class SHARKfilePhysicalChemical(GISMOfile):
"""
Class to hold data from SHARK (Svenskt HAvsaRKiv).
"""
# ==========================================================================
def __init__(self, data_file_path=None, settings_file_path=None, root_directory=None, **kwargs):
"""
Updated 20181005
:param data_file_path:
:param settings_file_path:
:param root_directory:
:param kwargs:
"""
kwargs.update(dict(data_file_path=data_file_path,
settings_file_path=settings_file_path,
root_directory=root_directory))
GISMOfile.__init__(self, **kwargs)
self.filter_data_options = self.filter_data_options + ['time', 'time_start', 'time_end', 'visit_id', 'visit_depth_id']
self.flag_data_options = []
self.mask_data_options = self.mask_data_options + []
class NODCStandardFormatCTD(DVStandardFormatCTD):
"""
The standard format for NODC includes 3 qc columns for each parameter.
"""
def __init__(self, **kwargs):
super().__init__(self, **kwargs)
self.filter_data_options = self.filter_data_options + []
self.flag_data_options = self.flag_data_options + ['qc_routine']
self.flag_data_options_mandatory = ['qc_routine']
self.mask_data_options = self.mask_data_options + []
self.valid_qc_routines = ['Manual', 'Profile range simple', 'Profile report']
self.mapping_qf_dv_to_cmems = self.mapping_files.get_file_object('mapping_quality_flags_dv_to_cmems.txt')
self.mapping_qf_cmems_to_dv = self.mapping_files.get_file_object('mapping_quality_flags_cmems_to_dv.txt')
self.valid_flags = list(set(self.valid_flags + [self.mapping_qf_dv_to_cmems.get(item)
for item in self.valid_flags]))
def __str__(self):
return f'Standard format NODC file: {self.file_id}'
def __repr__(self):
return f'{self.__class__.__name__}({self.file_id}'
def _get_data(self, *args, **kwargs):
"""
Created 20191203
:param args: parameters that you want to have data for.
:param kwargs: specify filter.
:return: dict with args as keys and list(s) as values.
"""
if not args:
raise GISMOExceptionMissingInputArgument
# Work on external column names
args = dict((self.internal_to_external.get(key, key), key) for key in args)
for arg in args:
if arg not in self.df.columns:
raise GISMOExceptionInvalidInputArgument(arg)
elif arg not in self.parameter_list:
raise GISMOExceptionInvalidInputArgument(arg)
# Create filter boolean
boolean = self._get_pandas_series(True)
for key, value in kwargs.get('filter_options', {}).items():
if value in [None, False]:
continue
if key not in self.filter_data_options:
raise GISMOExceptionInvalidOption('{} not in {}'.format(key, self.filter_data_options))
if key == 'time':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.time.isin(value_list))
elif key == 'time_start':
boolean = boolean & (self.df.time >= value)
elif key == 'time_end':
boolean = boolean & (self.df.time <= value)
elif key == 'visit_depth_id':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.visit_depth_id.isin(value_list))
elif key == 'visit_id':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.visit_id.isin(value_list))
elif key == 'depth':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.depth.isin(value_list))
elif key == 'depth_min':
boolean = boolean & (self.df.depth >= value)
elif key == 'depth_max':
boolean = boolean & (self.df.depth <= value)
# Extract filtered dataframe
# filtered_df = self.df.loc[boolean, sorted(args)].copy(deep=True)
filtered_df = self.df.loc[boolean].copy(deep=True)
mask_options = kwargs.get('mask_options', {})
# Create return dict and return
return_dict = {}
for par in args:
par_array = filtered_df[par].values
# if par == 'time':
# par_array = filtered_df[par].values
# elif
# try:
# par_array = filtered_df[par].astype(float).values
# except:
# Check mask options
qf_par = self.get_qf_par(par)
qc_routine = kwargs.get('qc_routine')
for opt, value in mask_options.items():
if opt not in self.mask_data_options:
raise GISMOExceptionInvalidOption
if opt == 'include_flags':
if not qf_par:
continue
# print('\n'.join(sorted(filtered_df.columns)))
qf_list = []
for v in value:
if v == 'no flag':
v = ''
else:
v = str(v)
qf_list.append(v)
keep_boolean = filtered_df[qf_par].astype(str).isin(qf_list)
par_array[~keep_boolean] = ''
elif opt == 'exclude_flags':
if not qf_par:
continue
qf_list = []
for v in value:
if v == 'no flag':
v = ''
else:
v = str(v)
qf_list.append(v)
nan_boolean = filtered_df[qf_par].astype(str).isin(qf_list)
par_array[nan_boolean] = ''
# Check output type
if par == 'time':
pass
elif kwargs.get('type_float') is True or par in kwargs.get('type_float', []):
float_par_list = []
for value in par_array:
try:
if value:
float_par_list.append(float(value))
else:
float_par_list.append(np.nan)
except:
float_par_list.append(value)
#raise ValueError
par_array = np.array(float_par_list)
elif kwargs.get('type_int') is True or par in kwargs.get('type_int', []):
float_par_list = []
for value in par_array:
try:
if value:
float_par_list.append(float(value))
else:
float_par_list.append(np.nan)
except:
float_par_list.append(value)
# raise ValueError
par_array = np.array(float_par_list)
# Map to given column name
return_dict[args[par]] = par_array
return return_dict
def flag_data(self, flag, *args, **kwargs):
"""
Created 20181005
:param flag: The flag you want to set for the parameter
:param args: parameters that you want to flag.
:param kwargs: conditions for flagging. Options are listed in self.flag_data_options
:return: None
"""
flag = self.mapping_qf_dv_to_cmems.get(flag)
qc_routine = kwargs.pop('qc_routine', None)
if qc_routine is None:
raise GISMOExceptionMissingInputArgument('qc_routine')
if qc_routine not in QC1_ROUTINE_LIST:
raise GISMOExceptionInvalidOption(f'{qc_routine} is not a valid qc_routine!')
qc_level = QC1_ROUTINE_LIST.index(qc_routine) + 1 # Level is used from end in list ei. [-qc_level]
flag = str(flag)
if flag not in self.valid_flags:
raise GISMOExceptionInvalidFlag('"{}", valid flags are "{}"'.format(flag, ', '.join(self.valid_flags)))
if flag == 'no flag':
flag = ''
# Check dependent parameters
all_args = []
for arg in args:
all_args.append(arg)
all_args.extend(self.get_dependent_parameters(arg))
# Work on external column names
args = [self.internal_to_external.get(arg, arg) for arg in all_args]
# kwargs contains conditions for flagging. Options are listed in self.flag_data_options.
boolean = self._get_pandas_series(True)
for key, value in kwargs.items():
# Check valid option
if key not in self.flag_data_options:
raise GISMOExceptionInvalidOption(key)
# if key == 'time':
# value_list = self._get_argument_list(value)
# boolean = boolean & (self.df.time.isin(value_list))
# elif key == 'time_start':
# boolean = boolean & (self.df.time >= value)
# elif key == 'time_end':
boolean = boolean & (self.df.time <= value)
if key == 'depth':
value_list = self._get_argument_list(value)
boolean = boolean & (self.df.depth.isin(value_list))
elif key == 'depth_min':
boolean = boolean & (self.df.depth >= value)
elif key == 'depth_max':
boolean = boolean & (self.df.depth <= value)
# print(np.where(boolean)[0])
# Flag data
for par in args:
if par not in self.df.columns:
# print('par', par)
# print()
# print('\n'.join(sorted(self.df.columns)))
raise GISMOExceptionInvalidParameter('Parameter {} not in data'.format(par))
qf_par = f'QC1_{par.split("[")[0].strip()}'
if qf_par not in self.df.columns:
raise GISMOExceptionMissingQualityParameter('for parameter "{}"'.format(par))
flag_list = kwargs.get('flags', None)
if flag_list:
if type(flag_list) != list:
flag_list = [flag_list]
if 'no flag' in flag_list:
flag_list.pop(flag_list.index('no flag'))
flag_list.append('')
par_boolean = boolean & (self.df[qf_par].isin(flag_list))
else:
par_boolean = boolean.copy(deep=True)
# print(par, qf_par, flag)
updated_qf_list = []
for item, b in zip(self.df[qf_par], par_boolean):
item = item.zfill(qc_level)
if b:
item_list = list(item)
item_list[-qc_level] = flag
item = ''.join(item_list)
updated_qf_list.append(item)
# print(len(self.df.loc[par_boolean, qf_par]))
# print(len(updated_qf_list))
# print(updated_qf_list)
self.df[qf_par] = updated_qf_list
self._sync_qc_columns(par)
def _sync_qc_columns(self, par):
"""
Looks in columns QC0_* and QC1_* to find correct flag for column Q_*
:param par: str, parameter to check
:return:
"""
def _sync(row):
qc0_list = list(row[qc0_par])
qc1_list = list(row[qc1_par])
tot_list = qc0_list + qc1_list
man_qf = qc1_list[-1]
qf = '0'
if man_qf != '0':
# Manual QC
qf = man_qf
elif '4' in tot_list:
qf = '4'
else:
qf = str(int(max([q for q in tot_list])))
qf = self.mapping_qf_cmems_to_dv.get(qf)
return qf
par = par.split("[")[0].strip()
q_par = f'Q_{par}'
qc0_par = f'QC0_{par}'
qc1_par = f'QC1_{par}'
self.df[q_par] = self.df[[qc0_par, qc1_par]].apply(_sync, axis=1)
def latlon_distance_array(lat_point, lon_point, lat_array, lon_array):
'''
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
'''
# convert decimal degrees to radians
lat_point = np.radians(lat_point)
lon_point = np.radians(lon_point)
lat_array = np.radians(lat_array)
lon_array = np.radians(lon_array)
# haversine formula
dlat = lat_array - lat_point
dlon = lon_array - lon_point
a = np.sin(dlat / 2.) ** 2 + np.cos(lat_point) * np.cos(lat_array) * np.sin(dlon / 2.) ** 2
c = 2 * np.arcsin(np.sqrt(a))
# km = 6367 * c
km = 6363 * c # Earth radius at around 57 degrees North
return km
# ==============================================================================
# ==============================================================================
def latlon_distance(origin, destination):
'''
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
'''
from math import radians, cos, sin, asin, sqrt
lat1, lon1 = origin
lat2, lon2 = destination
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2.) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.) ** 2
c = 2 * asin(sqrt(a))
# km = 6367 * c
km = 6363 * c # Earth radius at around 57 degrees North
return km
# ==============================================================================
# ==============================================================================
def old_get_matching_sample_index(sample_object=None,
gismo_object=None,
modulus=None,
diffs=None):
if not all([sample_object, gismo_object]):
return
time_diff = diffs['time']
dist_diff = diffs['dist']
depth_diff = diffs['depth']
# First reduce gismo dataframe.
# This can be done by only inluding data that is close enough to the sample stations.
# unique_positions
all_index = []
# Loop values
for i, [t, la, lo, d] in enumerate(zip(gismo_object.df.time,
gismo_object.df.lat,
gismo_object.df.lon,
gismo_object.df.depth)):
# if i < 20:
# continue
if modulus and i % modulus:
continue
# print i
# Depth: Get index for matching depth criteria to reduce loop length
df = sample_object.df.ix[(sample_object.df.depth >= d - depth_diff) & \
(sample_object.df.depth <= d + depth_diff), :]
# index_list = np.array(df.index)
# print 'len(df)', len(df)
# Loop index and and save index
for index in df.index:
if index in all_index:
# print 'Index already added'
continue
time = df.ix[index, 'time']
lat = df.ix[index, 'lat']
lon = df.ix[index, 'lon']
# print abs((time-t).total_seconds() / 60)
# print time_diff
# print abs((time-t).total_seconds() / 60) > time_diff
if abs((time - t).total_seconds() / 60) > time_diff:
# Continue if no match
# print 'No match for time'
continue
if (latlon_distance([la, lo], [lat, lon]) * 1000) > dist_diff:
# Continue if no match
# print 'No match for distance'
continue
# If this line i reached we have a match. Add this to all_index
print('Match for index:', index)
all_index.append(index)
return sorted(all_index)
# ==============================================================================
# ==============================================================================
def old_get_matching_sample_index(sample_object=None,
gismo_object=None,
diffs=None):
if not all([sample_object, gismo_object]):
return
time_diff = diffs['time']
dist_diff = diffs['dist']
depth_diff = diffs['depth']
# --------------------------------------------------------------------------
# First reduce sample dataframe.
# Make new column in sample dataframe to get position string
df = sample_object.df
df['pos_str'] = df['lat'].map(str) + df['lon'].map(str)
pos_list = list(set(df['pos_str']))
all_index = []
# Loop position list
for pos in pos_list:
pos_df = df.ix[df.pos_str == pos, :]
pos_index = pos_df.index[0]
la = pos_df.ix[pos_index, 'lat']
lo = pos_df.ix[pos_index, 'lon']
t = pos_df.ix[pos_index, 'time']
# Check distanse to all points in gismo_object.df
distance = latlon_distance_array(la, lo, gismo_object.df.lat, gismo_object.df.lon) * 1000
# Get boolean index for valid distance
boolean_distance = distance <= dist_diff
# Check if any point in distance is within reach
if not np.any(boolean_distance):
continue
### Getting this far garantees that staton is within distance.
# Check time to all points in gismo_object.df
time_delta = np.array(map(datetime.timedelta.total_seconds, np.abs(gismo_object.df.time - t))) / 60
# Get boolean index for valid time
boolean_time = time_delta <= time_diff
# Check if any point in time is within reach
if not np.any(boolean_time):
continue
### If we gotten this far we have match for both time and distance.
### But it migth not be the same match. Check this now.
boolean_dist_time = boolean_distance & boolean_time
# Check if any point match in both time and distance
if not np.any(boolean_dist_time):
continue
### We have a match for both time and distance
### Now we check agains depth
for i, d in pos_df.depth.iteritems():
depth_difference = abs(gismo_object.df.depth - d)
boolean_depth = depth_difference <= depth_diff
# Save index if any match for depth
if np.any(boolean_depth):
if i not in all_index:
all_index.append(i)
return sorted(all_index)
def apply_datetime_object_to_df(x, **kwargs):
"""
Used to apply datetime object to a pandas dataframe.
:param x:
:return:
"""
time_formats = ['%Y%m%d%H%M%S',
'%Y%m%d%H%M',
'%Y%m%d%H:%M',
'%Y%m%d%H.%M',
'%Y-%m-%d%H%M',
'%Y-%m-%d%H:%M',
'%Y-%m-%d%H.%M',
'%Y%m%d',
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S']
if type(x) == str:
x = [x]
time_string = ''.join([str(item) for item in x])
d_obj = None
for tf in time_formats:
try:
d_obj = datetime.datetime.strptime(time_string, tf)
return d_obj
except:
pass
raise GISMOExceptionInvalidTimeFormat('Could not find matching time format for "{}"'.format(time_string)) |
import libtcodpy as libtcod
SCREEN_WIDTH = 16
SCREEN_HEIGHT = 16
noise2d = libtcod.noise_new(1)
m = libtcod.heightmap_new(SCREEN_WIDTH, SCREEN_HEIGHT)
for y in range(SCREEN_HEIGHT):
for x in range(SCREEN_WIDTH):
n = libtcod.noise_get(noise2d, [2.0 * x / SCREEN_WIDTH - 1.0, 2.0 * y / SCREEN_HEIGHT - 1.0])
n = (n + 1.0) / 2.0
libtcod.heightmap_set_value(m, x, y, n)
for y in range(SCREEN_HEIGHT):
for x in range(SCREEN_WIDTH):
v = libtcod.heightmap_get_value(m, x, y)
print(v)
|
from django.db import models
from django.urls import reverse
class Book(models.Model):
title = models.CharField(max_length=50, verbose_name="Название книги")
description = models.TextField(verbose_name="Описание книги")
left = models.PositiveIntegerField(verbose_name="Книг на складе")
image = models.ImageField(verbose_name="Обложка книги")
def get_abolute_url(self):
return reverse("main_page:book", kwargs={'idx': self.pk})
|
# PYTHON - MIT - UNICAMP
# =============================================================================
# Created By : Matheus Percário Bruder
# Created Date : February 11th, 2021
# ============================================================================
def print_grid(grid):
for l in grid:
print(l)
class Ant:
def __init__(self, lin, col, direction):
self.lin = lin
self.col = col
self.direction = direction
self.coords = [lin, col]
self.steps = 0
def move(self):
if self.direction == 0: # NORTE
self.lin -= 1
elif self.direction == 1: # LESTE
self.col += 1
elif self.direction == 2: # SUL
self.lin += 1
elif self.direction == 3: # OESTE
self.col -= 1
self.coords = [self.lin, self.col]
self.steps += 1
def turn(self, rule):
if rule == 'L':
self.direction = (self.direction - 1) % 4 # 4= direções possíveis
else:
self.direction = (self.direction + 1) % 4
def __str__(self):
return f"-------------------\nANT\ncoords ={self.coords}\ndirection ={self.direction}\n"
def run_langton(rules, size):
# Criar grid
grid = [[0] * size for line in range(size)]
# cores baseadas nos indices das regras
colors = [k for k in range(len(rules))]
# Instanciar formiga
ant = Ant(size // 2, size // 2, 0)
while True:
# Dar primeiro passo
lin, col = ant.coords
if lin < 0 or lin >= size or col < 0 or col >= size:
break
grid[lin][col] = (grid[lin][col] + 1) % len(colors) # mudar cor
ant.move() # alterar coordenadas formiga
lin, col = ant.coords
if lin < 0 or lin >= size or col < 0 or col >= size:
break
dir = rules[grid[lin][col]] # rules[0] -> 'R' or rules[1] -> 'L' ...
ant.turn(dir) # virar formiga de acordo com a cor
# Movimentação formiga
print_grid(grid)
print(ant)
return ant.steps, grid
# Início programa ---------------------------------
_rules = 'RLRLL'
_size = 151
steps, grid = run_langton(_rules, _size)
# print(steps)
|
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
body = models.TextField(max_length=500)
slug = models.SlugField(max_length=200)
created = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.user}, {self.body[:30]}'
def get_absolute_url(self):
return reverse('posts:post-detail', kwargs={'pk': self.id})
|
from trello import TrelloClient
import json
from config.config import path_credentials_directory
def get_client():
"""
Authorize in Trello.
:return: Authenticated client object.
"""
with open(path_credentials_directory / "trello_credentials.json", "r") as file:
creds = json.load(file)
client = TrelloClient(
api_key=creds["key"],
api_secret=creds["token"]
)
return client
def get_boards_and_users(
client: TrelloClient
):
"""
Get list of all boards and users subscribed to them.
:param client: A Trello client object
:return: Results of request
"""
boards = []
all_boards = client.list_boards()
print(all_boards[0].name)
print(all_boards[0].description)
all_members = all_boards[0].all_members()
print(all_members[0].username)
print(all_members[0].id)
print(all_members[0].full_name)
# for board in all_boards:
# boards.append(
# {
# "name": board.name,
# "description": board.description,
# "users": board.all_members()
# }
# )
|
import unittest
from genomicinfo.entity_extraction.bow.bowdictionary import BOWdictionary
class TestBOWdictionary(unittest.TestCase):
def setUp(self) -> None:
self.extractor = BOWdictionary()
def test_extract(self):
# Bogus sentence for testing
ext = self.extractor.extract('Upon reanalyzing the che-1(ot73) deletion we found the downstream substitution to be a D233G change, rather than an earlier frameshift, as previ- ously reported (C hang et al .')
self.assertTrue(ext)
|
import json
import codecs
import pandas as pd
import numpy as np
csv_path = "./fixed_tweets.csv"
save_path = "./fixed_tweets.json"
df = pd.read_csv(csv_path, header=None, encoding = "utf-8")
df.columns =["tweet"]
df_json = {"konomania-tweets" : df.to_dict(orient='records')}
# text = json.dumps(df_json, sort_keys=True, indent=4, ensure_ascii=False)
#print(text.encode("utf-8"))
with codecs.open(save_path, 'w', 'utf-8') as f:
json.dump(df_json, f, indent=4, ensure_ascii=False)
|
# Title: 레드 블루 스패닝 트리
# Link: https://www.acmicpc.net/problem/4792
import sys
sys.setrecursionlimit(10 ** 6)
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
read_list_words = lambda: sys.stdin.readline().strip().split(' ')
class UnionFind:
def __init__(self, max_count):
self.p = [-1 for _ in range(max_count)]
def find(self, a: int):
if self.p[a] < 0:
return a
self.p[a] = self.find(self.p[a])
return self.p[a]
def union(self, a: int, b: int):
a = self.find(a)
b = self.find(b)
if a == b:
return False
if self.p[a] < self.p[b]:
self.p[a] += self.p[b]
self.p[b] = a
else:
self.p[b] += self.p[a]
self.p[a] = b
return True
def size(self, a: int):
return -self.p[self.find(a)]
def solution(n: int, m: int, k: int, edges: list):
minimum_k, maximum_k = 0, 0
edges = sorted(edges, key=lambda x: x[0])
uf_min = UnionFind(n+1)
uf_max = UnionFind(n+1)
for color, a, b in edges:
if uf_max.find(a) != uf_max.find(b):
if color == 'B':
maximum_k += 1
uf_max.union(a, b)
for color, a, b in reversed(edges):
if uf_min.find(a) != uf_min.find(b):
if color == 'B':
minimum_k += 1
uf_min.union(a, b)
if minimum_k <= k <= maximum_k:
return 1
return 0
def main():
while True:
n, m, k = read_list_int()
if n == m == k == 0:
break
edges = []
for _ in range(m):
c, a, b = read_list_words()
edges.append((c, int(a), int(b)))
print(solution(n, m, k, edges))
if __name__ == '__main__':
main() |
# Copyright 2022 Fuzz Introspector Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GIT_REPO = "https://github.com/ossf/fuzz-introspector"
GIT_BRANCH_URL = f"{GIT_REPO}/tree/main/"
ENGINE_INPUT_FILE = "fuzz-introspector-engine-input.json"
SUMMARY_FILE = "summary.json"
APP_EXIT_ERROR = 1
APP_EXIT_SUCCESS = 0
INPUT_BUG_FILE = "input_bugs.json"
|
import os
import urllib.request
import requests
from PIL import Image
from data_type_class import Data_type
from locale_class import Locale
def get_image_from_api(source: str, source_type: str, lol_version: str) -> Image:
if os.path.isfile(f'./temp/{source_type}/{source}.png'):
return Image.open(f'./temp/{source_type}/{source}.png')
if not os.path.isdir(f'./temp/{source_type}'):
os.mkdir(f'./temp/{source_type}')
urllib.request.urlretrieve(f'http://ddragon.leagueoflegends.com/cdn/{lol_version}/img/{source_type}/{source}.png',
f'./temp/{source_type}/{source}.png')
return Image.open(f'./temp/{source_type}/{source}.png')
def get_rune_image_from_api(name: str, source: str, size: tuple, background_color: tuple) -> Image:
if os.path.isfile(f'./temp/runes/{name}.jpg'):
return Image.open(f'./temp/runes/{name}.jpg')
if not os.path.isdir(f'./temp/runes'):
os.mkdir(f'./temp/runes')
urllib.request.urlretrieve(f'https://ddragon.canisback.com/img/{source}', f'./temp/runes/{name}.png')
rune_image = Image.open(f'./temp/runes/{name}.png')
rune_image = rune_image.resize(size)
return convert_png_to_jpg(name, rune_image, size, background_color)
def convert_png_to_jpg(image_name: str, png_image: Image, size: tuple, background_color: tuple) -> Image:
new_image = Image.new("RGBA", size, background_color)
new_image.paste(png_image, (0, 0), png_image)
new_image.convert('RGB').save(f'./temp/runes/{image_name}.jpg')
return new_image
def get_json_from_api(lol_version: str, data_type: Data_type, locale: Locale):
try:
response = requests.get(
f"http://ddragon.leagueoflegends.com/cdn/{lol_version}/data/{locale.value}/{data_type.value}.json")
response.raise_for_status()
except requests.exceptions.ConnectionError:
raise
except requests.exceptions.RequestException:
raise
return response.json() if data_type == Data_type.rune else response.json()['data']
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow.org style API Reference docs for a Python module."""
import collections
import os
import pathlib
import shutil
import tempfile
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
from tensorflow_docs.api_generator import config
from tensorflow_docs.api_generator import doc_generator_visitor
from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator import public_api
from tensorflow_docs.api_generator import reference_resolver as reference_resolver_lib
from tensorflow_docs.api_generator import toc as toc_lib
from tensorflow_docs.api_generator import traverse
from tensorflow_docs.api_generator.pretty_docs import docs_for_object
from tensorflow_docs.api_generator.report import utils
import yaml
# Used to add a collections.OrderedDict representer to yaml so that the
# dump doesn't contain !!OrderedDict yaml tags.
# Reference: https://stackoverflow.com/a/21048064
# Using a normal dict doesn't preserve the order of the input dictionary.
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return collections.OrderedDict(loader.construct_pairs(node))
yaml.add_representer(collections.OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
def write_docs(
*,
output_dir: Union[str, pathlib.Path],
parser_config: config.ParserConfig,
yaml_toc: Union[bool, Type[toc_lib.TocBuilder]],
root_module_name: str,
root_title: str = 'TensorFlow',
search_hints: bool = True,
site_path: str = 'api_docs/python',
gen_redirects: bool = True,
gen_report: bool = True,
extra_docs: Optional[Dict[int, str]] = None,
page_builder_classes: Optional[docs_for_object.PageBuilderDict] = None,
):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `config.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_module_name: (str) the name of the root module (`tf` for tensorflow).
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_path: The output path relative to the site root. Used in the
`_toc.yaml` and `_redirects.yaml` files.
gen_redirects: Bool which decides whether to generate _redirects.yaml file
or not.
gen_report: If True, a report for the library is generated by linting the
docstrings of its public API symbols.
extra_docs: To add docs for a particular object instance set it's __doc__
attribute. For some classes (list, tuple, etc) __doc__ is not writable.
Pass those docs like: `extra_docs={id(obj): "docs"}`
page_builder_classes: A optional dict of `{ObjectType:Type[PageInfo]}` for
overriding the default page builder classes.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
output_dir = pathlib.Path(output_dir)
site_path = pathlib.Path('/', site_path)
# Make output_dir.
if not output_dir.is_absolute():
raise ValueError("'output_dir' must be an absolute path.\n"
f" output_dir='{output_dir}'")
output_dir.mkdir(parents=True, exist_ok=True)
# Collect redirects for an api _redirects.yaml file.
redirects = []
api_report = None
if gen_report:
api_report = utils.ApiReport()
# Parse and write Markdown pages, resolving cross-links (`tf.symbol`).
num_docs_output = 0
for api_node in parser_config.api_tree.iter_nodes():
full_name = api_node.full_name
if api_node.output_type() is api_node.OutputType.FRAGMENT:
continue
# Generate docs for `py_object`, resolving references.
try:
page_info = docs_for_object.docs_for_object(
api_node=api_node,
parser_config=parser_config,
extra_docs=extra_docs,
search_hints=search_hints,
page_builder_classes=page_builder_classes)
if api_report is not None and not full_name.startswith(
('tf.compat.v', 'tf.keras.backend', 'tf.numpy',
'tf.experimental.numpy')):
api_report.fill_metrics(page_info)
except Exception as e:
raise ValueError(
f'Failed to generate docs for symbol: `{full_name}`') from e
path = output_dir / parser.documentation_path(full_name)
try:
path.parent.mkdir(exist_ok=True, parents=True)
path.write_text(page_info.page_text, encoding='utf-8')
num_docs_output += 1
except OSError as e:
raise OSError('Cannot write documentation for '
f'{full_name} to {path.parent}') from e
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
if gen_redirects:
for dup in duplicates:
from_path = site_path / dup.replace('.', '/')
to_path = site_path / full_name.replace('.', '/')
redirects.append({'from': str(from_path), 'to': str(to_path)})
if api_report is not None:
api_report.write(output_dir / root_module_name / 'api_report.pb')
if num_docs_output <= 1:
raise ValueError('The `DocGenerator` failed to generate any docs. Verify '
'your arguments (`base_dir` and `callbacks`). '
'Everything you want documented should be within '
'`base_dir`.')
if yaml_toc:
if isinstance(yaml_toc, bool):
yaml_toc = toc_lib.FlatModulesTocBuilder
toc = yaml_toc(site_path).build(parser_config.api_tree)
toc_path = output_dir / root_module_name / '_toc.yaml'
toc.write(toc_path)
if redirects and gen_redirects:
redirects_dict = {
'redirects': sorted(redirects, key=lambda redirect: redirect['from'])
}
api_redirects_path = output_dir / root_module_name / '_redirects.yaml'
with open(api_redirects_path, 'w') as redirect_file:
yaml.dump(redirects_dict, redirect_file, default_flow_style=False)
# Write a global index containing all full names with links.
with open(output_dir / root_module_name / 'all_symbols.md', 'w') as f:
global_index = parser.generate_global_index(
root_title, parser_config.index, parser_config.reference_resolver)
if not search_hints:
global_index = 'robots: noindex\n' + global_index
f.write(global_index)
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
def extract(
py_modules,
base_dir,
private_map: Dict[str, Any],
visitor_cls: Type[
doc_generator_visitor.DocGeneratorVisitor] = doc_generator_visitor
.DocGeneratorVisitor,
callbacks: Optional[public_api.ApiFilter] = None,
include_default_callbacks=True):
"""Walks the module contents, returns an index of all visited objects.
The return value is an instance of `self._visitor_cls`, usually:
`doc_generator_visitor.DocGeneratorVisitor`
Args:
py_modules: A list containing a single (short_name, module_object) pair.
like `[('tf',tf)]`.
base_dir: The package root directory. Nothing defined outside of this
directory is documented.
private_map: A {'path':["name"]} dictionary listing particular object
locations that should be ignored in the doc generator.
visitor_cls: A class, typically a subclass of
`doc_generator_visitor.DocGeneratorVisitor` that acumulates the indexes of
objects to document.
callbacks: Additional callbacks passed to `traverse`. Executed between the
`PublicApiFilter` and the accumulator (`DocGeneratorVisitor`). The
primary use case for these is to filter the list of children (see:
`public_api.local_definitions_filter`)
include_default_callbacks: When true the long list of standard
visitor-callbacks are included. When false, only the `callbacks` argument
is used.
Returns:
The accumulator (`DocGeneratorVisitor`)
"""
if callbacks is None:
callbacks = []
if len(py_modules) != 1:
raise ValueError("only pass one [('name',module)] pair in py_modules")
short_name, py_module = py_modules[0]
# The objects found during traversal, and their children are passed to each
# of these filters in sequence. Each visitor returns the list of children
# to be passed to the next visitor.
if include_default_callbacks:
filters = [
# filter the api.
public_api.FailIfNestedTooDeep(10),
public_api.filter_module_all,
public_api.add_proto_fields,
public_api.filter_builtin_modules,
public_api.filter_private_symbols,
public_api.FilterBaseDirs(base_dir),
public_api.FilterPrivateMap(private_map),
public_api.filter_doc_controls_skip,
public_api.ignore_typing
]
else:
filters = []
accumulator = visitor_cls()
traverse.traverse(
py_module, filters + callbacks, accumulator, root_name=short_name)
accumulator.build()
return accumulator
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
class DocGenerator:
"""Main entry point for generating docs."""
def __init__(
self,
root_title: str,
py_modules: Sequence[Tuple[str, Any]],
base_dir: Optional[Sequence[Union[str, pathlib.Path]]] = None,
code_url_prefix: Union[Optional[str], Sequence[Optional[str]]] = (),
search_hints: bool = True,
site_path: str = 'api_docs/python',
private_map: Optional[Dict[str, str]] = None,
visitor_cls: Type[doc_generator_visitor.DocGeneratorVisitor] = (
doc_generator_visitor.DocGeneratorVisitor),
api_cache: bool = True,
callbacks: Optional[List[public_api.ApiFilter]] = None,
yaml_toc: Union[bool, Type[toc_lib.TocBuilder]] = True,
gen_redirects: bool = True,
gen_report: bool = True,
extra_docs: Optional[Dict[int, str]] = None,
page_builder_classes: Optional[docs_for_object.PageBuilderDict] = None,
):
"""Creates a doc-generator.
Args:
root_title: A string. The main title for the project. Like "TensorFlow"
py_modules: The python module to document.
base_dir: String or tuple of strings. Directories that "Defined in" links
are generated relative to. **Modules outside one of these directories
are not documented**. No `base_dir` should be inside another.
code_url_prefix: String or tuple of strings. The prefix to add to "Defined
in" paths. These are zipped with `base-dir`, to set the `defined_in`
path for each file. The defined in link for `{base_dir}/path/to/file` is
set to `{code_url_prefix}/path/to/file`.
search_hints: Bool. Include metadata search hints at the top of each file.
site_path: Path prefix in the "_toc.yaml"
private_map: DEPRECATED. Use `api_generator.doc_controls`, or pass a
filter to the `callbacks` argument. A
`{"module.path.to.object": ["names"]}` dictionary. Specific
aliases that should not be shown in the resulting docs.
visitor_cls: An option to override the default visitor class
`doc_generator_visitor.DocGeneratorVisitor`.
api_cache: Bool. Generate an api_cache file. This is used to easily add
api links for backticked symbols (like `tf.add`) in other docs.
callbacks: Additional callbacks passed to `traverse`. Executed between the
`PublicApiFilter` and the accumulator (`DocGeneratorVisitor`). The
primary use case for these is to filter the list of children (see:
`public_api.ApiFilter` for the required signature)
yaml_toc: Bool which decides whether to generate _toc.yaml file or not.
gen_redirects: Bool which decides whether to generate _redirects.yaml file
or not.
gen_report: If True, a report for the library is generated by linting the
docstrings of its public API symbols.
extra_docs: To add docs for a particular object instance set it's __doc__
attribute. For some classes (list, tuple, etc) __doc__ is not writable.
Pass those docs like: `extra_docs={id(obj): "docs"}`
page_builder_classes: An optional dict of `{ObjectType:Type[PageInfo]}`
for overriding the default page builder classes.
"""
self._root_title = root_title
self._py_modules = py_modules
self._short_name = py_modules[0][0]
self._py_module = py_modules[0][1]
if base_dir is None:
# Determine the base_dir for the module
base_dir = public_api.get_module_base_dirs(self._py_module)
else:
if isinstance(base_dir, (str, pathlib.Path)):
base_dir = (base_dir,)
base_dir = tuple(pathlib.Path(d) for d in base_dir)
self._base_dir = base_dir
if not self._base_dir:
raise ValueError('`base_dir` cannot be empty')
if isinstance(code_url_prefix, str) or code_url_prefix is None:
code_url_prefix = (code_url_prefix,)
self._code_url_prefix = tuple(code_url_prefix)
if not self._code_url_prefix:
raise ValueError('`code_url_prefix` cannot be empty')
if len(self._code_url_prefix) != len(base_dir):
raise ValueError('The `base_dir` list should have the same number of '
'elements as the `code_url_prefix` list (they get '
'zipped together).')
self._search_hints = search_hints
self._site_path = site_path
self._private_map = private_map or {}
self._visitor_cls = visitor_cls
self.api_cache = api_cache
if callbacks is None:
callbacks = []
self._callbacks = callbacks
self._yaml_toc = yaml_toc
self._gen_redirects = gen_redirects
self._gen_report = gen_report
self._extra_docs = extra_docs
self._page_builder_classes = page_builder_classes
def make_reference_resolver(self, visitor):
return reference_resolver_lib.ReferenceResolver.from_visitor(
visitor, py_module_names=[self._short_name])
def make_parser_config(self,
visitor: doc_generator_visitor.DocGeneratorVisitor):
reference_resolver = self.make_reference_resolver(visitor)
return config.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
path_tree=visitor.path_tree,
api_tree=visitor.api_tree,
base_dir=self._base_dir,
code_url_prefix=self._code_url_prefix)
def run_extraction(self):
"""Walks the module contents, returns an index of all visited objects.
The return value is an instance of `self._visitor_cls`, usually:
`doc_generator_visitor.DocGeneratorVisitor`
Returns:
"""
visitor = extract(
py_modules=self._py_modules,
base_dir=self._base_dir,
private_map=self._private_map,
visitor_cls=self._visitor_cls,
callbacks=self._callbacks)
# Write the api docs.
parser_config = self.make_parser_config(visitor)
return parser_config
def build(self, output_dir):
"""Build all the docs.
This produces python api docs:
* generated from `py_module`.
* written to '{output_dir}/api_docs/python/'
Args:
output_dir: Where to write the resulting docs.
"""
workdir = pathlib.Path(tempfile.mkdtemp())
# Extract the python api from the _py_modules
parser_config = self.run_extraction()
work_py_dir = workdir / 'api_docs/python'
write_docs(
output_dir=str(work_py_dir),
parser_config=parser_config,
yaml_toc=self._yaml_toc,
root_title=self._root_title,
root_module_name=self._short_name.replace('.', '/'),
search_hints=self._search_hints,
site_path=self._site_path,
gen_redirects=self._gen_redirects,
gen_report=self._gen_report,
extra_docs=self._extra_docs,
page_builder_classes=self._page_builder_classes,
)
if self.api_cache:
parser_config.reference_resolver.to_json_file(
str(work_py_dir / self._short_name.replace('.', '/') /
'_api_cache.json'))
os.makedirs(output_dir, exist_ok=True)
# Typical results are something like:
#
# out_dir/
# {short_name}/
# _redirects.yaml
# _toc.yaml
# api_report.pb
# index.md
# {short_name}.md
#
# Copy the top level files to the `{output_dir}/`, delete and replace the
# `{output_dir}/{short_name}/` directory.
for work_path in work_py_dir.glob('*'):
out_path = pathlib.Path(output_dir) / work_path.name
out_path.parent.mkdir(exist_ok=True, parents=True)
if work_path.is_file():
shutil.copy2(work_path, out_path)
elif work_path.is_dir():
shutil.rmtree(out_path, ignore_errors=True)
shutil.copytree(work_path, out_path)
|
import numpy as np
import random
def activate(weights, inputs):
activation = weights[-1]
for i in range(len(weights) - 1):
activation += weights[i] * inputs[i]
return activation
def transfer(activation):
return 1.0 / (1.0 + np.exp(-activation))
def forward_propagate(network, row):
inputs = row
for layer in network:
new_inputs = []
for neuron in layer:
activation = activate(neuron["weights"], inputs)
neuron["output"] = transfer(activation)
new_inputs.append(neuron["output"])
inputs = new_inputs
return inputs
def predictPresent(network, row):
outputs = forward_propagate(network, row)
return outputs.index(max(outputs))
net = [
[
{
"weights": [
-1.6093878688145231,
0.1990195635919374,
2.296555103549103,
-0.962835234505048,
0.44050483763696485,
3.472454702749179,
2.9440384821229295,
2.8445612762417802,
2.594842645630752,
-1.1223742145599604,
-0.0982872830421154,
],
"output": 0.9995217494992951,
"delta": -1.4681027476114932e-09,
},
{
"weights": [
1.9766241154186381,
1.1480625356718765,
-3.095090412396784,
3.566790392682447,
1.5016801353414624,
0.0654386827982249,
3.6732957331814076,
-3.7308978944343956,
-2.028644515466221,
0.755763147899167,
-1.2301048127431682,
],
"output": 0.9827052455256156,
"delta": -9.74737363803606e-10,
},
{
"weights": [
1.0786441689392376,
-0.3061504015906138,
0.6776818658150896,
0.6798797902093786,
0.020604448654367134,
0.7618893247469398,
0.02878254404059247,
-0.17247725975988792,
0.4832291656707709,
0.40795721213088754,
0.6924487143523802,
],
"output": 0.9504475212988029,
"delta": 4.5292583651555806e-10,
},
{
"weights": [
-0.28932649412496014,
0.2882478401067099,
-1.195496623762568,
0.3846003906591432,
1.600660490867579,
3.4056693072449202,
2.098069570194462,
1.6425387038602324,
1.3146293185576237,
0.36049032284218996,
-0.944545632969982,
],
"output": 0.9930585466838056,
"delta": -7.615993894629358e-10,
},
{
"weights": [
2.0881326080260694,
1.7908525116657255,
-2.868116592049132,
0.07095825208493492,
1.8774879399554651,
4.277571568375474,
1.2089532993140977,
0.8289950828755157,
0.7217672891773552,
-0.38319104283657124,
-2.250627639852151,
],
"output": 0.9978156575349624,
"delta": -4.2980943965333116e-10,
},
{
"weights": [
0.7829194591028171,
1.2524187677527756,
0.1474388952392732,
-0.8954007546681998,
0.004648925988106471,
-0.1968203479377116,
1.3693702986859395,
-0.32206211708273674,
0.14997046472294295,
0.8393025823587495,
0.4122601617135446,
],
"output": 0.9775165592501492,
"delta": 7.866039827662531e-10,
},
{
"weights": [
-0.5461824207196235,
0.6566856800355586,
0.3203073849390045,
-0.16940263237640316,
0.9111704830164779,
0.6132755605376318,
-0.39497173801018576,
0.9382347205999444,
0.3620403053789543,
0.236490189937608,
-0.8556943847442923,
],
"output": 0.6692047725479826,
"delta": 1.7621825294030313e-10,
},
{
"weights": [
0.1847898255711901,
0.384049216059238,
-1.2762337053108284,
1.5835673411964841,
0.5046553562754501,
1.4206571619803305,
1.910252069612844,
-0.22123514458955923,
-0.9645638494274146,
-0.3508545124701723,
-0.5612958050574427,
],
"output": 0.9286989860739141,
"delta": -2.4845992919359195e-10,
},
{
"weights": [
-0.1568689134168308,
0.9944921409787328,
0.1829413990943324,
0.2906914564072817,
0.1699701372278749,
0.44186888022973886,
0.43711027538023894,
-0.2868543936705529,
0.3029006254994629,
0.43156238061834196,
0.0547003689933311,
],
"output": 0.8932344985438483,
"delta": 3.693751171407687e-10,
},
{
"weights": [
0.7651931545277842,
-1.3528971813048682,
3.1773876177423435,
2.3817951529372676,
-1.4989782627127715,
3.0204956853695166,
-0.4974839015671708,
-0.265471787430324,
-0.7336118932082898,
-0.11006068198401087,
0.9396249079265786,
],
"output": 0.9895778183472065,
"delta": 1.272500501934427e-09,
},
{
"weights": [
0.76703314534274,
-1.8116509042177331,
1.276661501591155,
2.5688446280982236,
-2.1337481050217035,
2.4161539038443913,
0.23835893541316686,
0.9584450180734164,
1.1474632180960005,
0.8276228374430421,
0.44978773619121315,
],
"output": 0.7689863133702648,
"delta": 1.346298459919606e-09,
},
{
"weights": [
-0.36999009470164695,
2.6336294413294183,
-1.6927945308752197,
-1.6275886940294262,
1.8538390305052979,
-2.0058080077041134,
-0.6871650473180408,
0.9885222670744113,
-0.3430409352949199,
0.862038327198487,
0.38369059732352967,
],
"output": 0.5288183718941926,
"delta": 1.1035542960748484e-09,
},
{
"weights": [
1.4408984492362598,
-1.3902728536162343,
1.731315387101993,
1.6151349758942497,
-1.2559396412264678,
2.1840422082104163,
-0.3455596407445834,
0.2866380252734186,
0.590357955210133,
0.3297858758845848,
1.0254103653209474,
],
"output": 0.9673872092966047,
"delta": 8.45287043959855e-10,
},
{
"weights": [
-0.015494900941334272,
1.2510407516421538,
0.6120783342096026,
-0.15481422697536693,
0.45198084025686935,
-0.0583946473995489,
0.09534161051228719,
0.554986705397533,
0.2998139044960483,
0.607565618114633,
-0.8299558166591398,
],
"output": 0.8185562117128263,
"delta": 2.909964743750628e-10,
},
{
"weights": [
1.138556926633264,
0.11936754428188064,
0.8204848105362982,
0.09515061815332554,
-0.14063908595635105,
0.36982629714541654,
0.791764194194902,
-0.27607816759621095,
0.8713069626660157,
0.8256625831973631,
1.1294618915179846,
],
"output": 0.9856396881264141,
"delta": 6.855857814844127e-10,
},
{
"weights": [
-2.492174997748589,
0.22213935162857706,
7.0169835462354655,
3.722898357516809,
3.656050625612946,
-3.8515420201964896,
0.34447426119532515,
-0.40760239219582256,
6.743081249820877,
-1.5804376361088401,
-1.8632870373834427,
],
"output": 0.9540272642110039,
"delta": -1.4363046230146337e-09,
},
{
"weights": [
6.6876373228205255,
-2.462579379061716,
-3.074640439570742,
5.976349507007451,
-0.1371580890239501,
1.3815853215713778,
5.513278973879676,
-5.959290372907175,
-3.0481717404969886,
0.3978012936650178,
-0.552877236635955,
],
"output": 0.9993611785091161,
"delta": -2.9918511703490497e-09,
},
{
"weights": [
-1.2683347676462815,
0.1187752271787765,
1.7557733849751345,
-0.4153408826819294,
0.7170083483730006,
2.539442231060569,
2.239623041007771,
2.180449993269922,
2.2580491531675793,
-0.7291985298912645,
-0.11385144811557141,
],
"output": 0.9974986894410769,
"delta": -8.013535450281793e-10,
},
{
"weights": [
-0.8605491462949854,
0.010684676651869575,
0.6636391462890293,
-0.20448863985000484,
0.24657806254855882,
0.25580755473766664,
0.011587503996244706,
1.1339753090995666,
1.0837357693389422,
-0.7055644609357806,
-1.0620124452827775,
],
"output": 0.32425957846552056,
"delta": -1.1811550199820174e-10,
},
{
"weights": [
1.0823492148407028,
-1.0157612093804615,
1.510223014342567,
1.1031094778829047,
-0.7187852754124545,
1.226179065641211,
0.7056274114969076,
-0.6688684851732033,
0.4664574516589172,
1.1116980167928525,
0.9616727303554568,
],
"output": 0.9770563911291313,
"delta": 9.031077643121872e-10,
},
],
[
{
"weights": [
-3.5811771920292523,
-3.4590030475115,
1.7672855044778508,
-2.479682631906018,
-3.254975359731665,
1.8997094877411984,
0.5451442063915155,
-0.833759321272434,
1.138251891453143,
3.900805516010275,
3.790985686137729,
2.9476289701507152,
2.5568451631864395,
0.7543584721238334,
2.5635676165823837,
-7.994338468469285,
-8.261501905125296,
-2.2305536081595894,
-0.20126797030218685,
2.8593857572174017,
3.424305611941282,
],
"output": 0.001430342776152367,
"delta": 7.653011171484693e-10,
},
{
"weights": [
3.955879334635359,
3.674708935347267,
-0.8615952569168914,
2.3642915048729125,
3.1365284024915723,
-2.2988404410453667,
-0.5341324614310031,
0.5484009768785507,
-0.7639082376028289,
-4.166379332850712,
-3.471773898141836,
-2.920905918578443,
-3.0101628858765213,
-1.0045782883110748,
-2.2031383403202205,
7.839280031512421,
8.277779448521951,
1.8972163620113258,
0.5842816332845902,
-2.9245174619084486,
-3.805708131015972,
],
"output": 0.9982962236610851,
"delta": -7.952532633524987e-10,
},
],
]
def strategy(history, memory):
rand = False
if not rand:
if history.shape[1] >= 10:
choice = predictPresent(
net, list(history[1][-10:][:9]) + [0]
) # history[1][-10:])
else:
choice = 1
if history.shape[1] >= 1 and history[1, -1] == 0:
choice = 0
else:
choice = 0
return choice, None
|
"""
if not os.path.exists(self.history_file):
open(self.history_file, 'a+').close()
readline.read_history_file(self.history_file)
readline.set_history_length(self.history_length)
atexit.register(readline.write_history_file, self.history_file)
readline.parse_and_bind('set enable-keypad on')
readline.set_completer(self.complete)
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
'description': 'Exploits ZTE F460 and F660 backdoor vulnerability that allows executing commands on operating system level.',
routersploit/modules/exploits/routers/zte/f460_f660_backdoor.py:55: print_info(self.execute(cmd))
routersploit/modules/exploits/routers/zte/f460_f660_backdoor.py:57: def execute(self, cmd):
routersploit/modules/exploits/routers/zte/f460_f660_backdoor.py:85: response = self.execute(cmd)
routersploit/modules/exploits/cameras/multi/P2P_wificam_rce.py:22: 'name': 'P2P wificam remote code execution',
routersploit/modules/exploits/cameras/multi/P2P_wificam_rce.py:25: unauthenticated remote code execution.""",
routersploit/modules/exploits/cameras/multi/P2P_wificam_rce.py:1303: def execute(self, cmd):
routersploit/modules/exploits/misc/asus/b1m_projector_rce.py:15: Exploit implementation for Asus B1M Projector Remote Code Execution vulnerability.
routersploit/modules/exploits/misc/asus/b1m_projector_rce.py:16: If the target is vulnerable, command loop is invoked that allows executing commands with root privileges.
routersploit/modules/exploits/misc/asus/b1m_projector_rce.py:20: 'description': 'Module exploits Asus B1M Projector Remote Code Execution vulnerability which '
routersploit/modules/exploits/misc/asus/b1m_projector_rce.py:21: 'allows executing command on operating system level with root privileges.',
routersploit/modules/exploits/misc/asus/b1m_projector_rce.py:45: def execute(self, cmd):
routersploit/modules/exploits/misc/asus/b1m_projector_rce.py:58: response_text = self.execute(cmd)
routersploit/modules/exploits/misc/wepresent/wipg1000_rce.py:16: If the target is vulnerable, it is possible to execute commands on operating system level.
routersploit/modules/exploits/misc/wepresent/wipg1000_rce.py:21: 'executing commands on operating system level.',
routersploit/modules/exploits/misc/wepresent/wipg1000_rce.py:48: def execute(self, cmd):
routersploit/modules/payloads/mipsbe/bind_tcp.py:82: # execve("//bin/sh", ["//bin/sh"], [/* 0 vars */]) = 0
routersploit/modules/payloads/mipsbe/bind_tcp.py:95: "\x24\x02\x0f\xab" + # li v0,4011 ( __NR_execve )
routersploit/modules/payloads/mipsle/bind_tcp.py:87: "\xab\x0f\x02\x24" + # li v0,4011 ( __NR_execve )
routersploit/modules/scanners/autopwn.py:142: with threads.ThreadPoolExecutor(self.threads) as executor:
routersploit/modules/scanners/autopwn.py:145: executor.submit(self.target_function, exploit)
Binary file routersploit/modules/scanners/autopwn.pyc matches
routersploit/modules/creds/http_basic_bruteforce.py:17:from routersploit.exceptions import StopThreadPoolExecutor
routersploit/modules/creds/http_basic_bruteforce.py:78: with threads.ThreadPoolExecutor(self.threads) as executor:
routersploit/modules/creds/http_basic_bruteforce.py:80: executor.submit(self.target_function, url, record)
routersploit/modules/creds/http_basic_bruteforce.py:101: raise StopThreadPoolExecutor
routersploit/modules/creds/http_digest_default.py:15:from routersploit.exceptions import StopThreadPoolExecutor
routersploit/modules/creds/http_digest_default.py:16:from routersploit.threads import ThreadPoolExecutor
routersploit/modules/creds/http_digest_default.py:72: with ThreadPoolExecutor(self.threads) as executor:
routersploit/modules/creds/http_digest_default.py:75: executor.submit(self.target_function, url, username, password)
routersploit/modules/creds/http_digest_default.py:98: raise StopThreadPoolExecutor
routersploit/modules/creds/http_digest_bruteforce.py:17:from routersploit.exceptions import StopThreadPoolExecutor
routersploit/modules/creds/http_digest_bruteforce.py:80: with threads.ThreadPoolExecutor(self.threads) as executor:
routersploit/modules/creds/http_digest_bruteforce.py:82: executor.submit(self.target_function, url, record)
routersploit/modules/creds/http_digest_bruteforce.py:103: raise StopThreadPoolExecutor
routersploit/modules/creds/http_basic_default.py:15:from routersploit.exceptions import StopThreadPoolExecutor
routersploit/modules/creds/http_basic_default.py:16:from routersploit.threads import ThreadPoolExecutor
routersploit/modules/creds/http_basic_default.py:70: with ThreadPoolExecutor(self.threads) as executor:
routersploit/modules/creds/http_basic_default.py:73: executor.submit(self.target_function, url, username, password)
routersploit/modules/creds/http_basic_default.py:96: raise StopThreadPoolExecutor
routersploit/threads.py:12:from .exceptions import StopThreadPoolExecutor
routersploit/threads.py:34: except StopThreadPoolExecutor:
routersploit/threads.py:42:class ThreadPoolExecutor(object):
Binary file routersploit/exceptions.pyc matches
Binary file routersploit/shell.pyc matches
routersploit/utils/__init__.py:113: before executing command specific to modules (ex. 'run').
routersploit/utils/__init__.py:179: multiple targets definition. Decorated function will be executed
Binary file routersploit/utils/__init__.pyc matches
routersploit/shell.py:33: print_success("Welcome to cmd. Commands are sent to the target via the execute method.")
routersploit/shell.py:117: params['exec_binary'] = data
routersploit/shell.py:129: print_status("Executing '{}' on the device...".format(cmd))
routersploit/shell.py:130: print_info(exploit.execute(cmd))
routersploit/shell.py:155: def __init__(self, exploit, payload, options, location="", wget_options={}, echo_options={}, exec_binary=None):
routersploit/shell.py:167: # process of executing payload
routersploit/shell.py:168: self.exec_binary = exec_binary
routersploit/shell.py:221: self.exploit.execute(cmd)
routersploit/shell.py:260: self.exploit.execute(cmd)
routersploit/shell.py:280: # set of instructions to execute payload on the device
routersploit/shell.py:281: if isinstance(self.exec_binary, list) or isinstance(self.exec_binary, tuple):
routersploit/shell.py:282: for item_exec_binary in self.exec_binary:
routersploit/shell.py:283: if isinstance(item_exec_binary, str):
routersploit/shell.py:285: commands.append(item_exec_binary.format(path))
routersploit/shell.py:287: commands.append(item_exec_binary)
routersploit/shell.py:288: elif callable(item_exec_binary):
routersploit/shell.py:289: commands.append(item_exec_binary(path))
routersploit/shell.py:291: # instruction to execute generic payload e.g. netcat / awk
routersploit/shell.py:292: elif isinstance(self.exec_binary, str):
routersploit/shell.py:294: commands.append(self.exec_binary.format(path))
routersploit/shell.py:296: commands.append(self.exec_binary)
routersploit/shell.py:298: # default way of executing payload
routersploit/shell.py:300: exec_binary_str = "chmod 777 {0}; {0}; rm {0}".format(path)
routersploit/shell.py:301: commands.append(exec_binary_str)
routersploit/shell.py:311: # execute binary
routersploit/shell.py:314: print_status("Executing payload on the device")
routersploit/shell.py:318: self.exploit.execute(command)
routersploit/shell.py:320: # asynchronous last command to execute binary & rm binary
routersploit/shell.py:321: thread = threading.Thread(target=self.exploit.execute, args=(commands[-1],))
routersploit/shell.py:336: # execute binary
routersploit/shell.py:341: self.exploit.execute(command)
routersploit/shell.py:343: # asynchronous last command to execute binary & rm binary
routersploit/shell.py:344: thread = threading.Thread(target=self.exploit.execute, args=(commands[-1],))
tests/test_completer.py:38: 'exec exit help search show use \r\n',
tests/test_completer.py:97: 'back exec help search setg use \r\n'
tests/test_completer.py:201: "back exec help search setg use \r\n"
tests/test_completer.py:214: 'back exec help search setg unsetg \r\n'
tests/test_interpreter.py:215: def test_command_run_exception_during_exploit_execution(self,
tests/test_interpreter.py:298: ['back', 'check', 'exec ', 'exit', 'help', 'run', 'search ',
tests/test_interpreter.py:307: ['back', 'check', 'exec ', 'exit', 'help', 'run', 'search ',
tests/test_interpreter.py:317: ['exec ', 'exit', 'help', 'search ', 'show ', 'use ']
tests/test_interpreter.py:677: def test_command_exec(self, mock_system):
tests/test_interpreter.py:678: self.interpreter.command_exec("foo -bar")
root@CRACK_COCAINE:~/Documents/routersploit# egrep -ix "show" * --color
grep: routersploit: Is a directory
grep: tests: Is a directory
root@CRACK_COCAINE:~/Documents/routersploit# egrep -irx "show" * --color
root@CRACK_COCAINE:~/Documents/routersploit# egrep -irx "show" * --color
root@CRACK_COCAINE:~/Documents/routersploit# egrep -irn "show" * --color
CONTRIBUTING.md:23:3. If exploit does not work but it should, check "show info" for more information. References should provide you with links to proof of concept exploits.
README.md:103: rsf (D-LINK DIR-300 & DIR-600 RCE) > show options
README.md:137: rsf (D-LINK DIR-300 & DIR-600 RCE) > show info
README.md:189: rsf (SSH Default Creds) > show options
README.md:250: rsf (D-Link Scanner) > show options
routersploit/interpreter.py:174: show [info|options|devices] Print information, options, or target devices for a module
routersploit/interpreter.py:185: self.show_sub_commands = (
routersploit/interpreter.py:191: ['use ', 'exec ', 'help', 'exit', 'show ', 'search ']
routersploit/interpreter.py:394: def _show_info(self, *args, **kwargs):
routersploit/interpreter.py:402: def _show_options(self, *args, **kwargs):
routersploit/interpreter.py:417: def _show_devices(self, *args, **kwargs): # TODO: cover with tests
routersploit/interpreter.py:433: def __show_modules(self, root=''):
routersploit/interpreter.py:437: def _show_all(self, *args, **kwargs):
routersploit/interpreter.py:438: self.__show_modules()
routersploit/interpreter.py:440: def _show_scanners(self, *args, **kwargs):
routersploit/interpreter.py:441: self.__show_modules('scanners')
routersploit/interpreter.py:443: def _show_exploits(self, *args, **kwargs):
routersploit/interpreter.py:444: self.__show_modules('exploits')
routersploit/interpreter.py:446: def _show_creds(self, *args, **kwargs):
routersploit/interpreter.py:447: self.__show_modules('creds')
routersploit/interpreter.py:449: def command_show(self, *args, **kwargs):
routersploit/interpreter.py:452: getattr(self, "_show_{}".format(sub_command))(*args, **kwargs)
routersploit/interpreter.py:454: utils.print_error("Unknown 'show' sub-command '{}'. "
routersploit/interpreter.py:455: "What do you want to show?\n"
routersploit/interpreter.py:456: "Possible choices are: {}".format(sub_command, self.show_sub_commands))
routersploit/interpreter.py:459: def complete_show(self, text, *args, **kwargs):
routersploit/interpreter.py:461: return [command for command in self.show_sub_commands if command.startswith(text)]
routersploit/interpreter.py:463: return self.show_sub_commands
Binary file routersploit/interpreter.pyc matches
routersploit/modules/exploits/routers/cisco/ios_http_authorization_bypass.py:19: Example: http://10.0.0.1/level/99/exec/show/startup/config
routersploit/modules/exploits/routers/cisco/ios_http_authorization_bypass.py:39: show_command = exploits.Option('show startup-config', 'Command to be executed e.g show startup-config')
routersploit/modules/exploits/routers/cisco/ios_http_authorization_bypass.py:45: url = "{}:{}/level/{}/exec/-/{}".format(self.target, self.port, self.access_level, self.show_command)
routersploit/modules/exploits/routers/cisco/ios_http_authorization_bypass.py:59: url = "{}:{}/level/{}/exec/-/{}".format(self.target, self.port, num, self.show_command)
routersploit/modules/exploits/routers/cisco/ios_http_authorization_bypass.py:64: if response.status_code == 200 and "Command was: {}".format(self.show_command) in response.text:
routersploit/modules/exploits/routers/cisco/catalyst_2960_rocem.py:44: device = exploits.Option(-1, 'Target device - use "show devices"', validators=validators.integer)
routersploit/modules/exploits/routers/cisco/catalyst_2960_rocem.py:55: # next bytes are shown as offsets from r1
routersploit/modules/exploits/routers/cisco/catalyst_2960_rocem.py:118: # next bytes are shown as offsets from r1
routersploit/modules/exploits/routers/cisco/catalyst_2960_rocem.py:176: print_error("Set target device - use \"show devices\" and \"set device <id>\"")
routersploit/modules/exploits/routers/multi/misfortune_cookie.py:129: device = exploits.Option('', 'Target device (show devices)') # target firmware
routersploit/modules/exploits/routers/multi/rom0.py:30: 'http://www.osvdb.org/show/osvdb/102668',
routersploit/modules/exploits/routers/2wire/4011g_5012nv_path_traversal.py:48: data = {"__ENH_SHOW_REDIRECT_PATH__": "/pages/C_4_0.asp/../../..{}".format(self.filename),
routersploit/modules/exploits/routers/2wire/4011g_5012nv_path_traversal.py:49: "__ENH_SUBMIT_VALUE_SHOW__": "Acceder",
routersploit/modules/exploits/routers/2wire/4011g_5012nv_path_traversal.py:68: data = {"__ENH_SHOW_REDIRECT_PATH__": "/pages/C_4_0.asp/../../../etc/passwd",
routersploit/modules/exploits/routers/2wire/4011g_5012nv_path_traversal.py:69: "__ENH_SUBMIT_VALUE_SHOW__": "Acceder",
routersploit/modules/exploits/routers/dlink/dir_300_320_600_615_info_disclosure.py:43: url = "{}:{}/model/__show_info.php?REQUIRE_FILE=/var/etc/httpasswd".format(self.target, self.port)
routersploit/modules/exploits/routers/dlink/dir_300_320_600_615_info_disclosure.py:60: url = "{}:{}/model/__show_info.php?REQUIRE_FILE=/var/etc/httpasswd".format(self.target, self.port)
Binary file routersploit/shell.pyc matches
routersploit/utils/__init__.py:369: Pretty printing dictionary in specific order. (as in 'show info' command)
Binary file routersploit/utils/__init__.pyc matches
routersploit/shell.py:35: print_status("For further exploitation use 'show payloads' and 'set payload <payload>' commands.")
routersploit/shell.py:52: elif cmd == "show payloads":
routersploit/shell.py:79: if cmd == "show options":
tests/test_completer.py:38: 'exec exit help search show use \r\n',
tests/test_completer.py:98: 'check exit run set show \r\n',
tests/test_completer.py:138: 'search set setg show \r\n',
tests/test_completer.py:202: "check exit run set show \r\n",
tests/test_completer.py:215: 'check exit run set show use \r\n',
tests/test_completer.py:244: def test_complete_show_raw(self):
tests/test_completer.py:249: 'show ',
tests/test_completer.py:252: def test_complete_show(self):
tests/test_completer.py:254: self.rsf.send("show \t\t")
tests/test_completer.py:261: def test_complete_show_info(self):
tests/test_completer.py:263: self.rsf.send("show i\t\t")
tests/test_completer.py:266: 'show info'
tests/test_completer.py:269: def test_complete_show_options(self):
tests/test_completer.py:271: self.rsf.send("show o\t\t")
tests/test_completer.py:274: 'show options'
tests/test_interpreter.py:299: 'set ', 'setg ', 'show ', 'use ']
tests/test_interpreter.py:308: 'set ', 'setg ', 'show ', 'unsetg ', 'use ']
tests/test_interpreter.py:317: ['exec ', 'exit', 'help', 'search ', 'show ', 'use ']
tests/test_interpreter.py:418: def test_show_info(self, mock_print):
tests/test_interpreter.py:430: self.interpreter._show_info()
tests/test_interpreter.py:449: def test_command_show_info_module_with_no_metadata(self, mock_print):
tests/test_interpreter.py:455: self.interpreter._show_info()
tests/test_interpreter.py:462: def test_show_options(self, mock_print):
tests/test_interpreter.py:481: self.interpreter._show_options()
tests/test_interpreter.py:509: def test_command_show_options_when_there_is_no_module_opts(self,
tests/test_interpreter.py:522: self.interpreter._show_options()
tests/test_interpreter.py:541: def test_command_show(self):
tests/test_interpreter.py:543: "_show_options") as mock_show_options:
tests/test_interpreter.py:544: self.interpreter.command_show("options")
tests/test_interpreter.py:545: mock_show_options.assert_called_once_with("options")
tests/test_interpreter.py:548: def test_command_show_unknown_sub_command(self, mock_print_error):
tests/test_interpreter.py:549: self.interpreter.command_show('unknown_sub_command')
tests/test_interpreter.py:551: "Unknown 'show' sub-command 'unknown_sub_command'. "
tests/test_interpreter.py:552: "What do you want to show?\n"
tests/test_interpreter.py:554: self.interpreter.show_sub_commands))
tests/test_interpreter.py:557: def test_show_all(self, mock_print):
tests/test_interpreter.py:567: self.interpreter._show_all()
tests/test_interpreter.py:581: def test_show_scanners(self, mock_print):
tests/test_interpreter.py:591: self.interpreter._show_scanners()
tests/test_interpreter.py:598: def test_show_exploits(self, mock_print):
tests/test_interpreter.py:608: self.interpreter._show_exploits()
tests/test_interpreter.py:615: def test_show_creds(self, mock_print):
tests/test_interpreter.py:625: self.interpreter._show_creds()
tests/test_interpreter.py:643: def test_if_command_show_info_has_module_required_decorator(self):
tests/test_interpreter.py:645: self.interpreter._show_info,
tests/test_interpreter.py:649: def test_if_command_show_options_has_module_required_decorator(self):
tests/test_interpreter.py:651: self.interpreter._show_options,
tests/test_interpreter.py:655: def test_if_command_show_devices_has_module_required_decorator(self):
tests/test_interpreter.py:657: self.interpreter._show_devices,
tests/test_interpreter.py:672: cmd, args = self.interpreter.parse_line("show options")
tests/test_interpreter.py:673: self.assertEqual(cmd, "show")
root@CRACK_COCAINE:~/Documents/routersploit# egrep -irn "cmd ==" * --color
routersploit/interpreter.py:117: if cmd == '':
routersploit/shell.py:52: elif cmd == "show payloads":
routersploit/shell.py:79: if cmd == "show options":
routersploit/shell.py:102: elif cmd == "run":
routersploit/shell.py:125: elif cmd == "back":
root@CRACK_COCAINE:~/Documents/routersploit# ls
CONTRIBUTING.md LICENSE README.md requirements.txt routersploit.log tests
Dockerfile Makefile requirements-dev.txt routersploit rsf.py
root@CRACK_COCAINE:~/Documents/routersploit# ls */*.log
ls: cannot access '*/*.log': No such file or directory
root@CRACK_COCAINE:~/Documents/routersploit# ls */*.log
ls: cannot access '*/*.log': No such file or directory
root@CRACK_COCAINE:~/Documents/routersploit# ls *.log/*.log
ls: cannot access '*.log/*.log': No such file or directory
root@CRACK_COCAINE:~/Documents/routersploit# ls
CONTRIBUTING.md LICENSE README.md requirements.txt routersploit.log tests
Dockerfile Makefile requirements-dev.txt routersploit rsf.py
root@CRACK_COCAINE:~/Documents/routersploit# cd routersploit
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# ls
exceptions.py exploits.pyc interpreter.py payloads.py printer.pyc templates utils wordlists
exceptions.pyc __init__.py interpreter.pyc payloads.pyc shell.py threads.py validators.py
exploits.py __init__.pyc modules printer.py shell.pyc threads.pyc validators.pyc
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# ls *.log
ls: cannot access '*.log': No such file or directory
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# ls -la
total 164
drwxr-xr-x 6 root root 4096 Dec 13 10:58 .
drwxr-xr-x 6 root root 4096 Dec 13 10:58 ..
-rw-r--r-- 1 root root 343 Dec 13 10:15 exceptions.py
-rw-r--r-- 1 root root 945 Dec 13 10:31 exceptions.pyc
-rw-r--r-- 1 root root 4078 Dec 13 10:15 exploits.py
-rw-r--r-- 1 root root 5152 Dec 13 10:31 exploits.pyc
-rw-r--r-- 1 root root 460 Dec 13 10:15 __init__.py
-rw-r--r-- 1 root root 797 Dec 13 10:31 __init__.pyc
-rw-r--r-- 1 root root 18793 Dec 13 10:15 interpreter.py
-rw-r--r-- 1 root root 21898 Dec 13 10:58 interpreter.pyc
drwxr-xr-x 6 root root 4096 Dec 13 10:31 modules
-rw-r--r-- 1 root root 5804 Dec 13 10:15 payloads.py
-rw-r--r-- 1 root root 5797 Dec 13 10:31 payloads.pyc
-rw-r--r-- 1 root root 618 Dec 13 10:15 printer.py
-rw-r--r-- 1 root root 1172 Dec 13 10:31 printer.pyc
-rw-r--r-- 1 root root 12397 Dec 13 10:15 shell.py
-rw-r--r-- 1 root root 10669 Dec 13 10:31 shell.pyc
drwxr-xr-x 2 root root 4096 Dec 13 10:15 templates
-rw-r--r-- 1 root root 2457 Dec 13 10:15 threads.py
-rw-r--r-- 1 root root 3417 Dec 13 10:58 threads.pyc
drwxr-xr-x 2 root root 4096 Dec 13 10:31 utils
-rw-r--r-- 1 root root 2725 Dec 13 10:15 validators.py
-rw-r--r-- 1 root root 3328 Dec 13 10:31 validators.pyc
drwxr-xr-x 2 root root 4096 Dec 13 10:31 wordlists
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# atom shell.py
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# ls
exceptions.py exploits.pyc interpreter.py payloads.py printer.pyc templates utils wordlists
exceptions.pyc __init__.py interpreter.pyc payloads.pyc shell.py threads.py validators.py
exploits.py __init__.pyc modules printer.py shell.pyc threads.pyc validators.pyc
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# python shell.py
Traceback (most recent call last):
File "shell.py", line 12, in <module>
from routersploit import validators
ImportError: No module named routersploit
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# python shell.py
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# ps aux
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 220016 8588 ? Ss 05:49 0:04 /sbin/init
root 2 0.0 0.0 0 0 ? S 05:49 0:00 [kthreadd]
root 3 0.0 0.0 0 0 ? S 05:49 0:00 [ksoftirqd/0]
root 5 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/0:0H]
root 7 0.0 0.0 0 0 ? S 05:49 0:02 [rcu_sched]
root 8 0.0 0.0 0 0 ? S 05:49 0:00 [rcu_bh]
root 9 0.0 0.0 0 0 ? S 05:49 0:00 [migration/0]
root 10 0.0 0.0 0 0 ? S< 05:49 0:00 [lru-add-drain]
root 11 0.0 0.0 0 0 ? S 05:49 0:00 [watchdog/0]
root 12 0.0 0.0 0 0 ? S 05:49 0:00 [cpuhp/0]
root 13 0.0 0.0 0 0 ? S 05:49 0:00 [cpuhp/1]
root 14 0.0 0.0 0 0 ? S 05:49 0:00 [watchdog/1]
root 15 0.0 0.0 0 0 ? S 05:49 0:00 [migration/1]
root 16 0.0 0.0 0 0 ? S 05:49 0:00 [ksoftirqd/1]
root 18 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/1:0H]
root 19 0.0 0.0 0 0 ? S 05:49 0:00 [cpuhp/2]
root 20 0.0 0.0 0 0 ? S 05:49 0:00 [watchdog/2]
root 21 0.0 0.0 0 0 ? S 05:49 0:00 [migration/2]
root 22 0.0 0.0 0 0 ? S 05:49 0:00 [ksoftirqd/2]
root 24 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/2:0H]
root 25 0.0 0.0 0 0 ? S 05:49 0:00 [cpuhp/3]
root 26 0.0 0.0 0 0 ? S 05:49 0:00 [watchdog/3]
root 27 0.0 0.0 0 0 ? S 05:49 0:00 [migration/3]
root 28 0.0 0.0 0 0 ? S 05:49 0:00 [ksoftirqd/3]
root 30 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/3:0H]
root 31 0.0 0.0 0 0 ? S 05:49 0:00 [kdevtmpfs]
root 32 0.0 0.0 0 0 ? S< 05:49 0:00 [netns]
root 33 0.0 0.0 0 0 ? S 05:49 0:00 [khungtaskd]
root 34 0.0 0.0 0 0 ? S 05:49 0:00 [oom_reaper]
root 35 0.0 0.0 0 0 ? S< 05:49 0:00 [writeback]
root 36 0.0 0.0 0 0 ? S 05:49 0:00 [kcompactd0]
root 38 0.0 0.0 0 0 ? SN 05:49 0:00 [ksmd]
root 39 0.0 0.0 0 0 ? SN 05:49 0:00 [khugepaged]
root 40 0.0 0.0 0 0 ? S< 05:49 0:00 [crypto]
root 41 0.0 0.0 0 0 ? S< 05:49 0:00 [kintegrityd]
root 42 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 43 0.0 0.0 0 0 ? S< 05:49 0:00 [kblockd]
root 47 0.0 0.0 0 0 ? S< 05:49 0:00 [devfreq_wq]
root 48 0.0 0.0 0 0 ? S< 05:49 0:00 [watchdogd]
root 49 0.0 0.0 0 0 ? S 05:49 0:00 [kswapd0]
root 50 0.0 0.0 0 0 ? S< 05:49 0:00 [vmstat]
root 62 0.0 0.0 0 0 ? S< 05:49 0:00 [kthrotld]
root 63 0.0 0.0 0 0 ? S< 05:49 0:00 [ipv6_addrconf]
root 109 0.0 0.0 0 0 ? S< 05:49 0:00 [acpi_thermal_pm]
root 111 0.0 0.0 0 0 ? S< 05:49 0:00 [ata_sff]
root 152 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_0]
root 153 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_0]
root 154 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_1]
root 155 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_1]
root 156 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_2]
root 157 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_2]
root 158 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_3]
root 159 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_3]
root 160 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_4]
root 161 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_4]
root 162 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_5]
root 163 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_5]
root 171 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 172 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 173 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 174 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 176 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/0:1H]
root 180 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/2:1H]
root 181 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/3:1H]
root 182 0.0 0.0 0 0 ? S< 05:49 0:00 [kworker/1:1H]
root 187 0.0 0.0 0 0 ? S< 05:49 0:00 [md]
root 209 0.0 0.0 0 0 ? S< 05:49 0:00 [raid5wq]
root 226 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 265 0.0 0.0 0 0 ? S< 05:49 0:00 [ext4-rsv-conver]
root 312 0.0 0.0 81684 10600 ? Ss 05:49 0:00 /lib/systemd/systemd-journald
root 317 0.0 0.0 0 0 ? S 05:49 0:00 [kauditd]
root 357 0.0 0.0 0 0 ? S 05:49 0:00 [nvidia-modeset]
root 360 0.0 0.0 46012 5316 ? Ss 05:49 0:00 /lib/systemd/systemd-udevd
root 394 0.0 0.0 0 0 ? S< 05:49 0:00 [edac-poller]
root 401 0.0 0.0 0 0 ? S 05:49 0:00 [irq/31-mei_me]
systemd+ 444 0.0 0.0 147096 5156 ? Ssl 05:49 0:00 /lib/systemd/systemd-timesyncd
root 578 0.0 0.0 0 0 ? S 05:49 0:00 [scsi_eh_6]
root 579 0.0 0.0 0 0 ? S< 05:49 0:00 [scsi_tmf_6]
root 580 0.0 0.0 0 0 ? S 05:49 0:00 [usb-storage]
root 599 0.0 0.0 0 0 ? S< 05:49 0:00 [bioset]
root 645 0.0 0.0 29624 2928 ? Ss 05:49 0:00 /usr/sbin/cron -f
root 646 0.0 0.0 427140 9064 ? Ssl 05:49 0:00 /usr/sbin/ModemManager
root 649 0.0 0.0 25336 4608 ? Ss 05:49 0:00 /usr/sbin/smartd -n
root 654 0.0 0.0 116156 3388 ? Ssl 05:49 0:00 /usr/sbin/irqbalance --foreground
root 655 0.0 0.0 275496 4004 ? Ssl 05:49 0:00 /usr/sbin/rsyslogd -n
message+ 656 0.0 0.0 48772 5356 ? Ss 05:49 0:03 /usr/bin/dbus-daemon --system --address=systemd: --nofork --no
root 662 0.0 0.0 50912 2976 ? S 05:49 0:00 /usr/sbin/CRON -f
root 667 0.0 0.0 50912 2960 ? S 05:49 0:00 /usr/sbin/CRON -f
root 679 0.0 0.1 454332 15496 ? Ssl 05:49 0:00 /usr/sbin/NetworkManager --no-daemon
root 681 0.0 0.0 65236 5724 ? Ss 05:49 0:00 /lib/systemd/systemd-logind
root 682 0.0 0.0 285712 6608 ? Ssl 05:49 0:00 /usr/lib/accountsservice/accounts-daemon
rtkit 684 0.0 0.0 187832 3024 ? SNsl 05:49 0:00 /usr/lib/rtkit/rtkit-daemon
root 699 0.0 0.0 290228 8484 ? Ssl 05:49 0:00 /usr/lib/policykit-1/polkitd --no-debug
root 712 0.0 0.0 4312 760 ? Ss 05:49 0:00 /bin/sh -c python /root/ArmsCommander/passwordattacks/autostar
root 725 0.0 0.0 4312 740 ? Ss 05:49 0:00 /bin/sh -c /bin/sh /usr/local/bin/IDS.sh
root 726 0.0 0.0 32088 9892 ? S 05:49 0:00 python /root/ArmsCommander/passwordattacks/autostart_password_
root 732 0.0 0.0 4312 772 ? S 05:49 0:00 /bin/sh /usr/local/bin/IDS.sh
root 775 0.0 0.0 8500 1564 ? Ss 05:49 0:00 nvidia-persistenced --persistence-mode
root 778 0.7 0.0 0 0 ? S 05:49 3:16 [irq/33-nvidia]
root 779 0.0 0.0 0 0 ? S 05:49 0:00 [nvidia]
pulse 825 0.0 0.0 357068 11264 ? S<l 05:49 0:00 pulseaudio -D --system
root 836 0.0 0.0 19872 9060 ? Ss 05:49 0:00 /sbin/mount.ntfs /dev/sda2 /mnt/Data2 -o rw
root 840 0.0 0.1 190896 14508 ? S 05:49 0:06 /usr/bin/python -O /usr/share/wicd/daemon/wicd-daemon.py --kee
root 842 0.0 0.0 13300 2648 ? Ss 05:49 0:00 /sbin/mount.ntfs /dev/sdb2 /mnt/Data3 -o rw
root 858 0.0 0.1 111348 17848 ? S 05:49 0:02 /usr/bin/python -O /usr/share/wicd/daemon/monitor.py
root 900 0.0 0.0 13052 2388 ? Ss 05:49 0:00 /sbin/mount.ntfs /dev/sdc1 /mnt/Data4 -o rw
root 909 0.0 0.0 0 0 ? S< 05:49 0:00 [iprt-VBoxWQueue]
root 921 0.0 0.0 0 0 ? S 05:49 0:00 [iprt-VBoxTscThr]
root 942 0.0 0.0 20484 1040 ? Ss 05:49 0:00 dhclient eth0
root 951 0.0 0.0 71996 5564 ? Ss 05:49 0:00 /usr/sbin/sshd -D
root 962 0.0 0.0 371556 7600 ? Ssl 05:49 0:00 /usr/sbin/gdm3
root 984 0.0 0.0 243928 7564 ? Sl 05:49 0:00 gdm-session-worker [pam/gdm-launch-environment]
Debian-+ 1019 0.0 0.0 80020 7988 ? Ss 05:49 0:00 /lib/systemd/systemd --user
Debian-+ 1020 0.0 0.0 102172 2384 ? S 05:49 0:00 (sd-pam)
root 1039 0.0 0.0 0 0 ? S 05:49 0:00 [UVM global queu]
root 1041 0.0 0.0 0 0 ? S 05:49 0:00 [UVM Tools Event]
Debian-+ 1062 0.0 0.0 203236 5324 tty1 Ssl+ 05:49 0:00 /usr/lib/gdm3/gdm-x-session gnome-session --autostart /usr/sha
root 1073 0.0 0.4 267228 49324 tty1 Sl+ 05:49 0:01 /usr/lib/xorg/Xorg vt1 -displayfd 3 -auth /run/user/132/gdm/Xa
Debian-+ 1175 0.0 0.0 47336 4032 ? Ss 05:49 0:00 /usr/bin/dbus-daemon --session --address=systemd: --nofork --n
Debian-+ 1177 0.0 0.1 553208 12440 tty1 Sl+ 05:49 0:00 /usr/lib/gnome-session/gnome-session-binary --autostart /usr/s
Debian-+ 1183 0.0 0.0 355200 6228 ? Ssl 05:49 0:00 /usr/lib/at-spi2-core/at-spi-bus-launcher
Debian-+ 1188 0.0 0.0 47116 3488 ? S 05:49 0:00 /usr/bin/dbus-daemon --config-file=/usr/share/defaults/at-spi2
Debian-+ 1191 0.0 0.0 222348 5312 ? Sl 05:49 0:00 /usr/lib/at-spi2-core/at-spi2-registryd --use-gnome-session
Debian-+ 1200 0.0 1.3 2274720 159764 tty1 Sl+ 05:49 0:03 /usr/bin/gnome-shell
root 1204 0.0 0.0 313532 8516 ? Ssl 05:49 0:00 /usr/lib/upower/upowerd
Debian-+ 1236 0.0 0.0 1229632 11912 ? Ssl 05:49 0:00 /usr/bin/pulseaudio --daemonize=no
root 1248 0.0 0.2 444476 31020 ? Ssl 05:50 0:02 /usr/lib/packagekit/packagekitd
Debian-+ 1249 0.0 0.2 1025836 29760 tty1 Sl+ 05:50 0:00 /usr/lib/gnome-settings-daemon/gnome-settings-daemon
root 1263 0.0 0.0 48328 5132 ? Ss 05:50 0:00 /sbin/wpa_supplicant -u -s -O /run/wpa_supplicant
colord 1269 0.0 0.1 317520 13508 ? Ssl 05:50 0:00 /usr/lib/colord/colord
root 1400 0.0 0.0 4312 1648 ? S 05:54 0:00 /bin/sh /root/Desktop/external_scans_njp_dnac/edit_2__bash_scr
root 1401 0.0 0.0 4312 1632 ? S 05:54 0:00 /bin/sh /root/Desktop/external_scans_njp_dnac/nmap_ssl_detecti
root 1434 0.0 1.4 594104 179572 ? Sl 05:54 0:03 snort -q -A full -c /etc/snort/snort.conf
postgres 1508 0.0 0.1 276848 24164 ? S 05:54 0:00 /usr/lib/postgresql/9.5/bin/postgres -D /var/lib/postgresql/9.
postgres 1511 0.0 0.2 293676 25356 ? S 05:54 0:00 /usr/lib/postgresql/9.6/bin/postgres -D /var/lib/postgresql/9.
postgres 1523 0.0 0.0 293676 3976 ? Ss 05:54 0:00 postgres: 9.6/main: checkpointer process
postgres 1524 0.0 0.0 293676 3976 ? Ss 05:54 0:00 postgres: 9.6/main: writer process
postgres 1525 0.0 0.0 293676 3976 ? Ss 05:54 0:00 postgres: 9.6/main: wal writer process
postgres 1526 0.0 0.0 294104 6396 ? Ss 05:54 0:00 postgres: 9.6/main: autovacuum launcher process
postgres 1527 0.0 0.0 148676 3172 ? Ss 05:54 0:00 postgres: 9.6/main: stats collector process
postgres 1534 0.0 0.0 276948 10940 ? Ss 05:54 0:00 postgres: 9.5/main: checkpointer process
postgres 1535 0.0 0.0 276848 6196 ? Ss 05:54 0:00 postgres: 9.5/main: writer process
postgres 1536 0.0 0.0 276848 9544 ? Ss 05:54 0:00 postgres: 9.5/main: wal writer process
postgres 1537 0.0 0.0 277280 6780 ? Ss 05:54 0:00 postgres: 9.5/main: autovacuum launcher process
postgres 1538 0.0 0.0 132004 4860 ? Ss 05:54 0:00 postgres: 9.5/main: stats collector process
root 4164 0.0 0.0 248052 7768 ? Sl 05:59 0:00 gdm-session-worker [pam/gdm-password]
root 4169 0.0 0.0 71576 7748 ? Ss 05:59 0:00 /lib/systemd/systemd --user
root 4170 0.0 0.0 249636 2424 ? S 05:59 0:00 (sd-pam)
root 4177 0.0 0.0 287764 7876 ? Sl 05:59 0:00 /usr/bin/gnome-keyring-daemon --daemonize --login
root 4181 0.0 0.0 203236 5464 tty2 Ssl+ 05:59 0:00 /usr/lib/gdm3/gdm-x-session --run-script default
root 4183 0.6 0.6 315908 73964 tty2 Sl+ 05:59 2:49 /usr/lib/xorg/Xorg vt2 -displayfd 3 -auth /run/user/0/gdm/Xaut
root 4187 0.0 0.0 48112 5004 ? Ss 05:59 0:00 /usr/bin/dbus-daemon --session --address=systemd: --nofork --n
root 4189 0.0 0.1 774704 13264 tty2 Sl+ 05:59 0:00 /usr/lib/gnome-session/gnome-session-binary
root 4244 0.0 0.0 11100 332 ? Ss 05:59 0:00 /usr/bin/ssh-agent x-session-manager
root 4251 0.0 0.0 355208 6404 ? Ssl 05:59 0:00 /usr/lib/at-spi2-core/at-spi-bus-launcher
root 4256 0.0 0.0 47240 3908 ? S 05:59 0:00 /usr/bin/dbus-daemon --config-file=/usr/share/defaults/at-spi2
root 4259 0.0 0.0 222348 6844 ? Sl 05:59 0:02 /usr/lib/at-spi2-core/at-spi2-registryd --use-gnome-session
root 4276 1.2 4.2 3012136 523172 tty2 Sl+ 05:59 5:04 /usr/bin/gnome-shell
root 4279 0.0 0.0 285212 6796 ? Ssl 05:59 0:00 /usr/lib/gvfs/gvfsd
root 4284 0.0 0.0 417780 5472 ? Sl 05:59 0:00 /usr/lib/gvfs/gvfsd-fuse /run/user/0/gvfs -f -o big_writes
root 4294 0.0 0.1 2284756 13428 ? S<sl 05:59 0:01 /usr/bin/pulseaudio --daemonize=no
root 4301 0.0 0.1 615016 15260 ? Sl 05:59 0:00 /usr/lib/gnome-shell/gnome-shell-calendar-server
root 4312 0.0 0.1 1229040 21172 ? Ssl 05:59 0:00 /usr/lib/evolution/evolution-source-registry
root 4317 0.0 0.0 468280 11700 ? Ssl 05:59 0:00 /usr/lib/telepathy/mission-control-5
root 4322 0.0 0.2 769136 33164 ? Sl 05:59 0:00 /usr/lib/gnome-online-accounts/goa-daemon
root 4324 0.0 0.0 359628 11140 ? Ssl 05:59 0:00 /usr/lib/gvfs/gvfs-udisks2-volume-monitor
root 4328 0.0 0.0 380244 8420 ? Ssl 05:59 0:06 /usr/lib/udisks2/udisksd --no-debug
root 4335 0.0 0.0 269568 5936 ? Ssl 05:59 0:00 /usr/lib/gvfs/gvfs-goa-volume-monitor
root 4348 0.0 0.0 370416 7452 ? Sl 05:59 0:00 /usr/lib/gnome-online-accounts/goa-identity-service
root 4357 0.0 0.0 271368 5204 ? Ssl 05:59 0:00 /usr/lib/gvfs/gvfs-mtp-volume-monitor
root 4361 0.0 0.0 374324 7344 ? Ssl 05:59 0:00 /usr/lib/gvfs/gvfs-afc-volume-monitor
root 4366 0.0 0.0 283740 6240 ? Ssl 05:59 0:00 /usr/lib/gvfs/gvfs-gphoto2-volume-monitor
root 4372 0.0 0.3 1501824 40612 tty2 Sl+ 05:59 0:01 /usr/lib/gnome-settings-daemon/gnome-settings-daemon
root 4383 0.0 0.0 187504 4868 ? Sl 05:59 0:00 /usr/lib/dconf/dconf-service
root 4402 0.0 0.0 442468 11988 tty2 SNl+ 05:59 0:00 /usr/lib/tracker/tracker-miner-apps
root 4406 0.0 0.0 503548 11800 tty2 Sl+ 05:59 0:00 /usr/lib/gnome-settings-daemon/gsd-printer
root 4407 0.0 0.2 896700 25800 tty2 SNl+ 05:59 0:01 /usr/lib/tracker/tracker-extract
root 4413 0.0 0.2 697632 34676 tty2 Sl+ 05:59 0:08 psensor
root 4420 0.0 0.0 426144 8036 ? Ssl 05:59 0:00 /usr/bin/zeitgeist-daemon
root 4428 0.0 0.1 441384 18492 ? Sl 05:59 0:00 zeitgeist-datahub
root 4432 0.0 0.0 341280 12172 tty2 SNl+ 05:59 0:00 /usr/lib/tracker/tracker-miner-user-guides
root 4433 0.0 0.8 936708 109568 tty2 Sl+ 05:59 0:02 /usr/bin/gnome-software --gapplication-service
root 4434 0.0 0.3 398160 38368 tty2 Sl+ 05:59 0:00 /usr/bin/python -O /usr/share/wicd/gtk/wicd-client.py --tray
root 4435 0.0 0.1 329128 16240 ? Ssl 05:59 0:00 /usr/lib/zeitgeist/zeitgeist/zeitgeist-fts
root 4448 0.0 0.3 1252228 41124 tty2 Sl+ 05:59 0:01 nautilus-desktop
root 4455 0.0 0.2 838396 32404 ? Ssl 05:59 0:00 /usr/lib/evolution/evolution-calendar-factory
root 4469 0.0 0.5 472696 62988 ? Ssl 05:59 0:00 /usr/lib/tracker/tracker-store
root 4490 0.0 0.0 361372 6760 ? Sl 06:00 0:00 /usr/lib/gvfs/gvfsd-trash --spawner :1.17 /org/gtk/gvfs/exec_s
root 4528 0.0 0.1 874164 24140 ? Sl 06:00 0:00 /usr/lib/evolution/evolution-calendar-factory-subprocess --fac
root 4538 0.0 0.1 711224 20136 ? Sl 06:00 0:00 /usr/lib/evolution/evolution-calendar-factory-subprocess --fac
root 4543 0.0 0.1 707516 22508 ? Ssl 06:00 0:00 /usr/lib/evolution/evolution-addressbook-factory
root 4556 0.0 0.1 847816 22520 ? Sl 06:00 0:00 /usr/lib/evolution/evolution-addressbook-factory-subprocess --
root 4592 0.0 0.0 195860 5904 ? Ssl 06:00 0:00 /usr/lib/gvfs/gvfsd-metadata
root 4706 0.0 0.0 73248 5680 ? S 06:00 0:00 /usr/lib/x86_64-linux-gnu/gconf/gconfd-2
root 12661 0.0 0.4 97744 50384 ? S 09:21 0:03 nmap -sV --version-all -sS -sU -T4 -A -v -PE -PP -PS80,443 -PA
root 14894 0.0 0.0 4312 756 ? S 10:05 0:00 /bin/sh /root/Desktop/external_scans_njp_dnac/edit_2__bash_scr
root 14895 0.0 0.0 4312 760 ? S 10:05 0:00 /bin/sh /root/Desktop/external_scans_njp_dnac/nmap_ssl_detecti
root 14925 0.0 0.0 0 0 ? S 10:05 0:00 [kworker/0:1]
debian-+ 16497 0.0 0.3 95956 46616 ? Ss 10:06 0:03 /usr/bin/tor --defaults-torrc /usr/share/tor/tor-service-defau
root 17557 0.0 0.4 98292 51092 ? S 10:11 0:03 nmap -sV --version-all -sS -sU -T4 -A -v -PE -PP -PS80,443 -PA
root 17606 0.0 0.0 0 0 ? S 10:12 0:00 [kworker/2:1]
root 18365 0.0 0.0 20484 4344 ? S 10:14 0:00 /sbin/dhclient -d -q -sf /usr/lib/NetworkManager/nm-dhcp-helpe
root 18366 0.0 0.0 20488 4492 ? S 10:14 0:00 /sbin/dhclient -d -q -6 -N -sf /usr/lib/NetworkManager/nm-dhcp
root 18649 0.0 0.4 98528 51184 ? S 10:16 0:02 nmap -sV --version-all -sS -sU -T4 -A -v -PE -PP -PS80,443 -PA
root 18927 0.0 0.0 0 0 ? S 10:21 0:00 [kworker/2:0]
root 20617 0.0 0.0 355524 10588 ? Sl 10:58 0:00 /usr/lib/gvfs/gvfsd-http --spawner :1.17 /org/gtk/gvfs/exec_sp
root 21636 0.1 0.3 674912 45324 ? Rsl 11:16 0:06 /usr/lib/gnome-terminal/gnome-terminal-server
root 21685 0.0 0.0 19904 3748 pts/0 Ss 11:16 0:00 bash
root 21731 0.0 0.0 11196 1852 ? S 11:17 0:00 /bin/bash /usr/bin/atom automated_routersploit.py
root 21733 0.7 1.8 1701044 229200 ? Sl 11:17 0:46 /usr/share/atom/atom --executed-from=/root/Desktop/projects --
root 21735 0.0 0.2 386384 31508 ? S 11:17 0:00 /usr/share/atom/atom --type=zygote --no-sandbox
root 21752 0.7 1.2 618452 157900 ? Sl 11:17 0:45 /usr/share/atom/atom --type=gpu-process --channel=21733.0.1796
root 21766 4.5 2.9 2677712 358404 ? SLl 11:17 4:26 /usr/share/atom/atom --type=renderer --no-sandbox --primordial
root 21798 0.0 1.4 1037400 175488 ? Sl 11:17 0:00 /usr/share/atom/atom --eval CompileCache = require('/usr/share
root 21860 0.0 1.5 1125292 191380 ? Sl 11:17 0:01 /usr/share/atom/atom --type=renderer --no-sandbox --primordial
root 22657 0.0 0.4 98168 51076 ? S 11:38 0:02 nmap -sV --version-all -sS -sU -T4 -A -v -PE -PP -PS80,443 -PA
root 22727 0.9 1.3 1300788 170308 tty2 SLl+ 11:38 0:45 /usr/lib/x86_64-linux-gnu/opera/opera
root 22732 0.0 0.0 6372 764 tty2 S+ 11:38 0:00 /usr/lib/x86_64-linux-gnu/opera/opera_sandbox /usr/lib/x86_64-
root 22733 0.0 0.2 414796 28468 tty2 S+ 11:38 0:00 /usr/lib/x86_64-linux-gnu/opera/opera --type=zygote
root 22735 0.0 0.0 414796 6836 tty2 S+ 11:38 0:00 /usr/lib/x86_64-linux-gnu/opera/opera --type=zygote
root 22770 0.3 1.5 631396 184436 tty2 Sl+ 11:38 0:14 /usr/lib/x86_64-linux-gnu/opera/opera --type=gpu-process --fie
root 22815 0.0 0.1 449624 16872 tty2 S+ 11:38 0:00 /usr/lib/x86_64-linux-gnu/opera/opera --type=gpu-broker
root 22837 0.0 0.8 903308 99284 tty2 Sl+ 11:38 0:02 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 22869 0.4 1.5 1030196 188436 tty2 Sl+ 11:38 0:22 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 22874 0.0 0.8 867616 108284 tty2 Sl+ 11:38 0:01 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 22958 0.3 0.5 793020 70780 tty2 Sl+ 11:39 0:14 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 22981 0.0 0.8 897684 106020 tty2 Sl+ 11:39 0:01 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 23709 0.0 0.0 0 0 ? S 12:04 0:00 [kworker/0:2]
root 23710 0.0 0.0 0 0 ? S 12:04 0:01 [kworker/1:1]
root 23801 0.0 0.0 0 0 ? S 12:04 0:00 [kworker/3:1]
root 25827 0.0 0.2 152444 30960 pts/0 Sl+ 12:30 0:00 python rsf.py
root 25835 0.0 0.0 19904 3744 pts/2 Ss 12:30 0:00 bash
root 25838 0.1 0.2 78772 31192 pts/2 T 12:30 0:02 python
root 25883 0.3 0.0 0 0 ? S 12:32 0:04 [kworker/1:3]
root 26038 0.0 0.0 0 0 ? S 12:35 0:00 [kworker/u8:2]
root 26117 2.5 0.2 78772 31284 pts/2 T 12:38 0:24 python
root 26142 0.0 0.9 850144 121776 tty2 Sl+ 12:38 0:00 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 26188 0.0 0.0 19908 3740 pts/3 Ss 12:40 0:00 bash
root 26262 0.0 0.0 0 0 ? S 12:43 0:00 [kworker/u8:0]
root 26321 0.0 0.0 4312 772 ? S 12:45 0:00 sh -c /usr/local/bin/pp64.bin /root/Documents/wifi_cracking_wo
root 26322 0.5 1.5 189684 186780 ? S 12:45 0:02 /usr/local/bin/pp64.bin /root/Documents/wifi_cracking_wordlist
root 26323 39.5 2.1 14749888 266600 ? Sl 12:45 3:36 hashcat -a 0 -w 4 -m 2500 /root/ArmsCommander/logs/HashCat/has
root 26324 0.0 0.0 0 0 ? S 12:45 0:00 [UVM GPU1 BH]
root 26361 0.0 0.0 0 0 ? S 12:46 0:00 [kworker/3:0]
root 26427 0.2 0.9 913408 111736 tty2 Sl+ 12:48 0:00 /usr/lib/x86_64-linux-gnu/opera/opera --type=renderer --field-
root 26509 0.0 0.2 78516 30724 pts/2 S+ 12:49 0:00 python
root 26631 0.0 0.0 0 0 ? S 12:51 0:00 [kworker/3:2]
root 26773 0.0 0.0 40320 3264 pts/3 R+ 12:54 0:00 ps aux
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# cd /root/Documents/routersploit/
root@CRACK_COCAINE:~/Documents/routersploit# ls
CONTRIBUTING.md LICENSE README.md requirements.txt routersploit.log tests
Dockerfile Makefile requirements-dev.txt routersploit rsf.py
root@CRACK_COCAINE:~/Documents/routersploit# cat routersploit.log
2017-12-13 10:58:29,120 ERROR routersploit.exceptions Error during loading 'routersploit/modules/scanners/autopwn'
Error: 'module' object has no attribute 'optionsParser'
It should be valid path to the module. Use <tab> key multiple times for completion.
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/utils/__init__.py", line 66, in import_exploit
module = importlib.import_module(path)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/root/Documents/routersploit/routersploit/modules/scanners/autopwn.py", line 103, in <module>
main()
File "/root/Documents/routersploit/routersploit/modules/scanners/autopwn.py", line 92, in main
parser = optparse.optionsParser('usage %parser -r <manual control> -a full auto')
AttributeError: 'module' object has no attribute 'optionsParser'
2017-12-13 10:59:03,109 ERROR routersploit.exceptions Unknown command: 'options'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_options'
2017-12-13 10:59:45,547 ERROR routersploit.exceptions Unknown command: 'shpw'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shpw'
2017-12-13 11:07:26,917 ERROR routersploit.exceptions Unknown command: 'jobs'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_jobs'
2017-12-13 12:43:21,542 ERROR routersploit.exceptions Unknown command: 'hrelep'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_hrelep'
2017-12-13 12:54:46,998 ERROR routersploit.exceptions Unknown command: 'shell'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shell'
2017-12-13 12:54:51,933 ERROR routersploit.exceptions Unknown command: 'shell('')'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shell('')'
2017-12-13 12:55:16,471 ERROR routersploit.exceptions Unknown command: '-v'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_-v'
2017-12-13 12:55:18,348 ERROR routersploit.exceptions Unknown command: 'version'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_version'
root@CRACK_COCAINE:~/Documents/routersploit# cat routersploit.log
2017-12-13 10:58:29,120 ERROR routersploit.exceptions Error during loading 'routersploit/modules/scanners/autopwn'
Error: 'module' object has no attribute 'optionsParser'
It should be valid path to the module. Use <tab> key multiple times for completion.
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/utils/__init__.py", line 66, in import_exploit
module = importlib.import_module(path)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/root/Documents/routersploit/routersploit/modules/scanners/autopwn.py", line 103, in <module>
main()
File "/root/Documents/routersploit/routersploit/modules/scanners/autopwn.py", line 92, in main
parser = optparse.optionsParser('usage %parser -r <manual control> -a full auto')
AttributeError: 'module' object has no attribute 'optionsParser'
2017-12-13 10:59:03,109 ERROR routersploit.exceptions Unknown command: 'options'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_options'
2017-12-13 10:59:45,547 ERROR routersploit.exceptions Unknown command: 'shpw'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shpw'
2017-12-13 11:07:26,917 ERROR routersploit.exceptions Unknown command: 'jobs'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_jobs'
2017-12-13 12:43:21,542 ERROR routersploit.exceptions Unknown command: 'hrelep'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_hrelep'
2017-12-13 12:54:46,998 ERROR routersploit.exceptions Unknown command: 'shell'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shell'
2017-12-13 12:54:51,933 ERROR routersploit.exceptions Unknown command: 'shell('')'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shell('')'
2017-12-13 12:55:16,471 ERROR routersploit.exceptions Unknown command: '-v'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_-v'
2017-12-13 12:55:18,348 ERROR routersploit.exceptions Unknown command: 'version'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_version'
root@CRACK_COCAINE:~/Documents/routersploit# cat routersploit.log
2017-12-13 10:58:29,120 ERROR routersploit.exceptions Error during loading 'routersploit/modules/scanners/autopwn'
Error: 'module' object has no attribute 'optionsParser'
It should be valid path to the module. Use <tab> key multiple times for completion.
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/utils/__init__.py", line 66, in import_exploit
module = importlib.import_module(path)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/root/Documents/routersploit/routersploit/modules/scanners/autopwn.py", line 103, in <module>
main()
File "/root/Documents/routersploit/routersploit/modules/scanners/autopwn.py", line 92, in main
parser = optparse.optionsParser('usage %parser -r <manual control> -a full auto')
AttributeError: 'module' object has no attribute 'optionsParser'
2017-12-13 10:59:03,109 ERROR routersploit.exceptions Unknown command: 'options'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_options'
2017-12-13 10:59:45,547 ERROR routersploit.exceptions Unknown command: 'shpw'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shpw'
2017-12-13 11:07:26,917 ERROR routersploit.exceptions Unknown command: 'jobs'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_jobs'
2017-12-13 12:43:21,542 ERROR routersploit.exceptions Unknown command: 'hrelep'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_hrelep'
2017-12-13 12:54:46,998 ERROR routersploit.exceptions Unknown command: 'shell'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shell'
2017-12-13 12:54:51,933 ERROR routersploit.exceptions Unknown command: 'shell('')'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_shell('')'
2017-12-13 12:55:16,471 ERROR routersploit.exceptions Unknown command: '-v'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_-v'
2017-12-13 12:55:18,348 ERROR routersploit.exceptions Unknown command: 'version'
Traceback (most recent call last):
File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
command_handler = getattr(self, "command_{}".format(command))
AttributeError: 'RoutersploitInterpreter' object has no attribute 'command_version'
root@CRACK_COCAINE:~/Documents/routersploit# cat routersploit.log grep^C
root@CRACK_COCAINE:~/Documents/routersploit# egrep -irn command_handler * --color
routersploit/interpreter.py:66: def get_command_handler(self, command):
routersploit/interpreter.py:70: :return: command_handler
routersploit/interpreter.py:73: command_handler = getattr(self, "command_{}".format(command))
routersploit/interpreter.py:77: return command_handler
routersploit/interpreter.py:89: command_handler = self.get_command_handler(command)
routersploit/interpreter.py:90: command_handler(args)
Binary file routersploit/interpreter.pyc matches
routersploit.log:18: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:19: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:23: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:24: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:28: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:29: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:33: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:34: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:38: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:39: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:43: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:44: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:48: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:49: command_handler = getattr(self, "command_{}".format(command))
routersploit.log:53: File "/root/Documents/routersploit/routersploit/interpreter.py", line 73, in get_command_handler
routersploit.log:54: command_handler = getattr(self, "command_{}".format(command))
root@CRACK_COCAINE:~/Documents/routersploit# cd routersploit
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# atom interpreter.py
root@CRACK_COCAINE:~/Documents/routersploit/routersploit# cd -
/root/Documents/routersploit
root@CRACK_COCAINE:~/Documents/routersploit# python
Python 2.7.14 (default, Sep 17 2017, 18:50:44)
[GCC 7.2.0] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import routersploit
>>> from routersploit import *
>>> import interpreter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named interpreter
>>> from routersploit import interpreter
>>> interpreter.get_command_handler(self, 'show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'get_command_handler'
>>> interpreter.get_command_handler(self, 'show')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'get_command_handler'
>>> interpreter.BaseInterpreter.get_command_handler(self, 'show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> interpreter.BaseInterpreter(self).get_command_handler(self, 'show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> interpreter.BaseInterpreter(object).get_command_handler(self, 'show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter(__init__).get_command_handler(self, 'show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__init__' is not defined
>>> interpreter.BaseInterpreter(object).get_command_handler(self, 'show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter(object).get_command_handler('show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter(object).get_command_handler('show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter(object).__init__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> dir(interpreter.BaseInterpreter)
['__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'commands', 'complete', 'default_completer', 'get_command_handler', 'global_help', 'history_file', 'history_length', 'parse_line', 'prompt', 'raw_command_completer', 'setup', 'start', 'suggested_commands']
>>> dir(interpreter.BaseInterpreter.commands)
['__call__', '__class__', '__cmp__', '__delattr__', '__doc__', '__format__', '__func__', '__get__', '__getattribute__', '__hash__', '__init__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__self__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'im_class', 'im_func', 'im_self']
>>> dir(interpreter.BaseInterpreter.get_command_handler
... show payloads
File "<stdin>", line 2
show payloads
^
SyntaxError: invalid syntax
>>> dir(interpreter.BaseInterpreter.get_command_handler('show payloads'))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method get_command_handler() must be called with BaseInterpreter instance as first argument (got str instance instead)
>>> BaseInterpreter()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> help(BaseInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> BaseInterpreter(__init__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> import BaseInterpreter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named BaseInterpreter
>>> import BaseInterpreter~
KeyboardInterrupt
>>> dir(BaseInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> dir(interpreter.BaseInterpreter)
['__class__', '__delattr__', '__dict__', '__doc__', '__format__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 'commands', 'complete', 'default_completer', 'get_command_handler', 'global_help', 'history_file', 'history_length', 'parse_line', 'prompt', 'raw_command_completer', 'setup', 'start', 'suggested_commands']
>>> dir(interpreter.suggeseted_commands)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'suggeseted_commands'
>>> dir(interpreter.suggeseted_commands)
KeyboardInterrupt
>>> vars
<built-in function vars>
>>> vars()
{'exploits': <module 'routersploit.exploits' from 'routersploit/exploits.pyc'>, 'mute': <function mute at 0x7fb92ad31398>, 'utils': <module 'routersploit.utils' from 'routersploit/utils/__init__.pyc'>, 'print_status': <function print_status at 0x7fb92ad315f0>, 'print_error': <function print_error at 0x7fb92ad31578>, 'printer': <module 'routersploit.printer' from 'routersploit/printer.pyc'>, 'validators': <module 'routersploit.validators' from 'routersploit/validators.pyc'>, 'interpreter': <module 'routersploit.interpreter' from 'routersploit/interpreter.pyc'>, 'print_table': <function print_table at 0x7fb92ad31b18>, 'sanitize_url': <function sanitize_url at 0x7fb92ad31b90>, 'wordlists': <module 'routersploit.wordlists' from 'routersploit/wordlists/__init__.pyc'>, '__package__': None, 'payloads': <module 'routersploit.payloads' from 'routersploit/payloads.pyc'>, 'tokenize': <function tokenize at 0x7fb92ad31f50>, '__doc__': None, 'http_request': <function http_request at 0x7fb92ad31cf8>, 'shell': <function shell at 0x7fb92accf398>, 'ssh_interactive': <function ssh_interactive at 0x7fb92ad31de8>, '__builtins__': <module '__builtin__' (built-in)>, 'boolify': <function boolify at 0x7fb92ad31d70>, 'LockedIterator': <class 'routersploit.utils.LockedIterator'>, 'multi': <function multi at 0x7fb92ad31488>, '__name__': '__main__', 'modules': <module 'routersploit.modules' from 'routersploit/modules/__init__.pyc'>, 'routersploit': <module 'routersploit' from 'routersploit/__init__.pyc'>, 'index_modules': <function index_modules at 0x7fb92ad15c80>, 'print_success': <function print_success at 0x7fb92ad31668>, 'print_info': <function print_info at 0x7fb92ad316e0>, 'exceptions': <module 'routersploit.exceptions' from 'routersploit/exceptions.pyc'>, 'random_text': <function random_text at 0x7fb92ad31c80>}
>>> dir(interpreter.prompt)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'prompt'
>>> help(interpreter.prompt)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'prompt'
>>> prompt
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'prompt' is not defined
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'tokenize', 'utils', 'validators', 'wordlists']
>>> dir(multi)
['__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__doc__', '__format__', '__get__', '__getattribute__', '__globals__', '__hash__', '__init__', '__module__', '__name__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']
>>> dir(_call_)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '_call_' is not defined
>>> dir(_globals_)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '_globals_' is not defined
>>> globals
<built-in function globals>
>>> globals()
{'exploits': <module 'routersploit.exploits' from 'routersploit/exploits.pyc'>, 'mute': <function mute at 0x7fb92ad31398>, 'utils': <module 'routersploit.utils' from 'routersploit/utils/__init__.pyc'>, 'print_status': <function print_status at 0x7fb92ad315f0>, 'print_error': <function print_error at 0x7fb92ad31578>, 'printer': <module 'routersploit.printer' from 'routersploit/printer.pyc'>, 'validators': <module 'routersploit.validators' from 'routersploit/validators.pyc'>, 'interpreter': <module 'routersploit.interpreter' from 'routersploit/interpreter.pyc'>, 'print_table': <function print_table at 0x7fb92ad31b18>, 'sanitize_url': <function sanitize_url at 0x7fb92ad31b90>, 'wordlists': <module 'routersploit.wordlists' from 'routersploit/wordlists/__init__.pyc'>, '__package__': None, 'payloads': <module 'routersploit.payloads' from 'routersploit/payloads.pyc'>, 'tokenize': <function tokenize at 0x7fb92ad31f50>, '__doc__': None, 'http_request': <function http_request at 0x7fb92ad31cf8>, 'shell': <function shell at 0x7fb92accf398>, 'ssh_interactive': <function ssh_interactive at 0x7fb92ad31de8>, '__builtins__': <module '__builtin__' (built-in)>, 'boolify': <function boolify at 0x7fb92ad31d70>, 'LockedIterator': <class 'routersploit.utils.LockedIterator'>, 'multi': <function multi at 0x7fb92ad31488>, '__name__': '__main__', 'modules': <module 'routersploit.modules' from 'routersploit/modules/__init__.pyc'>, 'routersploit': <module 'routersploit' from 'routersploit/__init__.pyc'>, 'index_modules': <function index_modules at 0x7fb92ad15c80>, 'print_success': <function print_success at 0x7fb92ad31668>, 'print_info': <function print_info at 0x7fb92ad316e0>, 'exceptions': <module 'routersploit.exceptions' from 'routersploit/exceptions.pyc'>, 'random_text': <function random_text at 0x7fb92ad31c80>}
>>> exec
File "<stdin>", line 1
exec
^
SyntaxError: invalid syntax
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'tokenize', 'utils', 'validators', 'wordlists']
>>> dir(interpreter)
['BaseInterpreter', 'BasePayload', 'Counter', 'Exploit', 'GLOBAL_OPTS', 'PrinterThread', 'RoutersploitException', 'RoutersploitInterpreter', '__builtins__', '__doc__', '__file__', '__name__', '__package__', 'atexit', 'itertools', 'os', 'print_function', 'printer_queue', 'readline', 'sys', 'traceback', 'utils']
>>> dir(itertools)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'itertools' is not defined
>>> dir(readline)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'readline' is not defined
>>> dir(sys)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'sys' is not defined
>>> dir(utils)
['ABCMeta', 'CREDS_DIR', 'DummyFile', 'EXPLOITS_DIR', 'LockedIterator', 'MODULES_DIR', 'NonStringIterable', 'PrintResource', 'Resource', 'RoutersploitException', 'SCANNERS_DIR', '__builtins__', '__cprint', '__doc__', '__file__', '__name__', '__package__', '__path__', 'absolute_import', 'abstractmethod', 'boolify', 'collections', 'colors', 'create_exploit', 'create_resource', 'errno', 'http_request', 'humanize_path', 'import_exploit', 'importlib', 'index_modules', 'iter_modules', 'mkdir_p', 'module_required', 'multi', 'mute', 'os', 'posix_shell', 'pprint_dict_in_order', 'print_error', 'print_function', 'print_info', 'print_lock', 'print_status', 'print_success', 'print_table', 'printer_queue', 'pythonize_path', 'random', 'random_text', 're', 'requests', 'rsf_modules', 'sanitize_url', 'select', 'socket', 'ssh_interactive', 'stop_after', 'string', 'strtobool', 'sys', 'thread_output_stream', 'threading', 'tokenize', 'windows_shell', 'wraps']
>>> dir(import_exploit)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'import_exploit' is not defined
>>> dir(ssh_interactive)
['__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__doc__', '__format__', '__get__', '__getattribute__', '__globals__', '__hash__', '__init__', '__module__', '__name__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']
>>> dir(__dict__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> help(__dict__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> help(ssh_interactive.__dict__)
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'tokenize', 'utils', 'validators', 'wordlists']
>>> dir(utils)
['ABCMeta', 'CREDS_DIR', 'DummyFile', 'EXPLOITS_DIR', 'LockedIterator', 'MODULES_DIR', 'NonStringIterable', 'PrintResource', 'Resource', 'RoutersploitException', 'SCANNERS_DIR', '__builtins__', '__cprint', '__doc__', '__file__', '__name__', '__package__', '__path__', 'absolute_import', 'abstractmethod', 'boolify', 'collections', 'colors', 'create_exploit', 'create_resource', 'errno', 'http_request', 'humanize_path', 'import_exploit', 'importlib', 'index_modules', 'iter_modules', 'mkdir_p', 'module_required', 'multi', 'mute', 'os', 'posix_shell', 'pprint_dict_in_order', 'print_error', 'print_function', 'print_info', 'print_lock', 'print_status', 'print_success', 'print_table', 'printer_queue', 'pythonize_path', 'random', 'random_text', 're', 'requests', 'rsf_modules', 'sanitize_url', 'select', 'socket', 'ssh_interactive', 'stop_after', 'string', 'strtobool', 'sys', 'thread_output_stream', 'threading', 'tokenize', 'windows_shell', 'wraps']
>>> import absolute_import
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: No module named absolute_import
>>> from routersploit import absolute_import
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: cannot import name absolute_import
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'tokenize', 'utils', 'validators', 'wordlists']
>>> dir(interpreter)
['BaseInterpreter', 'BasePayload', 'Counter', 'Exploit', 'GLOBAL_OPTS', 'PrinterThread', 'RoutersploitException', 'RoutersploitInterpreter', '__builtins__', '__doc__', '__file__', '__name__', '__package__', 'atexit', 'itertools', 'os', 'print_function', 'printer_queue', 'readline', 'sys', 'traceback', 'utils']
>>> dir(interpreter.command_run)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'command_run'
>>> dir(interpreter.command_run())
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'command_run'
>>> dir(interpreter.command_run('show'))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'command_run'
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'tokenize', 'utils', 'validators', 'wordlists']
>>> dir(modules)
['__author__', '__builtins__', '__doc__', '__file__', '__name__', '__package__', '__path__']
>>> dir(index_modules)
['__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__doc__', '__format__', '__get__', '__getattribute__', '__globals__', '__hash__', '__init__', '__module__', '__name__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']
>>> dir(utils)
['ABCMeta', 'CREDS_DIR', 'DummyFile', 'EXPLOITS_DIR', 'LockedIterator', 'MODULES_DIR', 'NonStringIterable', 'PrintResource', 'Resource', 'RoutersploitException', 'SCANNERS_DIR', '__builtins__', '__cprint', '__doc__', '__file__', '__name__', '__package__', '__path__', 'absolute_import', 'abstractmethod', 'boolify', 'collections', 'colors', 'create_exploit', 'create_resource', 'errno', 'http_request', 'humanize_path', 'import_exploit', 'importlib', 'index_modules', 'iter_modules', 'mkdir_p', 'module_required', 'multi', 'mute', 'os', 'posix_shell', 'pprint_dict_in_order', 'print_error', 'print_function', 'print_info', 'print_lock', 'print_status', 'print_success', 'print_table', 'printer_queue', 'pythonize_path', 'random', 'random_text', 're', 'requests', 'rsf_modules', 'sanitize_url', 'select', 'socket', 'ssh_interactive', 'stop_after', 'string', 'strtobool', 'sys', 'thread_output_stream', 'threading', 'tokenize', 'windows_shell', 'wraps']
>>> dir(pythonize)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'pythonize' is not defined
>>> dir(pythonize_path)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'pythonize_path' is not defined
>>> help(pythonize_path)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'pythonize_path' is not defined
>>> dir(pythonize_path)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'pythonize_path' is not defined
>>> posix_shell()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'posix_shell' is not defined
>>> help(posix_shell)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'posix_shell' is not defined
>>> help(import_exploit)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'import_exploit' is not defined
>>> help(__dict__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'tokenize', 'utils', 'validators', 'wordlists']
>>> dir(modules)
['__author__', '__builtins__', '__doc__', '__file__', '__name__', '__package__', '__path__']
>>> dir(random_text)
['__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__doc__', '__format__', '__get__', '__getattribute__', '__globals__', '__hash__', '__init__', '__module__', '__name__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']
>>> dir(utils)
['ABCMeta', 'CREDS_DIR', 'DummyFile', 'EXPLOITS_DIR', 'LockedIterator', 'MODULES_DIR', 'NonStringIterable', 'PrintResource', 'Resource', 'RoutersploitException', 'SCANNERS_DIR', '__builtins__', '__cprint', '__doc__', '__file__', '__name__', '__package__', '__path__', 'absolute_import', 'abstractmethod', 'boolify', 'collections', 'colors', 'create_exploit', 'create_resource', 'errno', 'http_request', 'humanize_path', 'import_exploit', 'importlib', 'index_modules', 'iter_modules', 'mkdir_p', 'module_required', 'multi', 'mute', 'os', 'posix_shell', 'pprint_dict_in_order', 'print_error', 'print_function', 'print_info', 'print_lock', 'print_status', 'print_success', 'print_table', 'printer_queue', 'pythonize_path', 'random', 'random_text', 're', 'requests', 'rsf_modules', 'sanitize_url', 'select', 'socket', 'ssh_interactive', 'stop_after', 'string', 'strtobool', 'sys', 'thread_output_stream', 'threading', 'tokenize', 'windows_shell', 'wraps']
>>> dir(create_exploit)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'create_exploit' is not defined
>>> help(create_exploit)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'create_exploit' is not defined
>>> help(humanize_path)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'humanize_path' is not defined
>>> dir(humanize_path)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'humanize_path' is not defined
>>> routersploit.utils.__init__
<method-wrapper '__init__' of module object at 0x7fb92ecd26a8>
>>> dir(routersploit.utils)
['ABCMeta', 'CREDS_DIR', 'DummyFile', 'EXPLOITS_DIR', 'LockedIterator', 'MODULES_DIR', 'NonStringIterable', 'PrintResource', 'Resource', 'RoutersploitException', 'SCANNERS_DIR', '__builtins__', '__cprint', '__doc__', '__file__', '__name__', '__package__', '__path__', 'absolute_import', 'abstractmethod', 'boolify', 'collections', 'colors', 'create_exploit', 'create_resource', 'errno', 'http_request', 'humanize_path', 'import_exploit', 'importlib', 'index_modules', 'iter_modules', 'mkdir_p', 'module_required', 'multi', 'mute', 'os', 'posix_shell', 'pprint_dict_in_order', 'print_error', 'print_function', 'print_info', 'print_lock', 'print_status', 'print_success', 'print_table', 'printer_queue', 'pythonize_path', 'random', 'random_text', 're', 'requests', 'rsf_modules', 'sanitize_url', 'select', 'socket', 'ssh_interactive', 'stop_after', 'string', 'strtobool', 'sys', 'thread_output_stream', 'threading', 'tokenize', 'windows_shell', 'wraps']
>>> help(string)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'string' is not defined
>>> vars()
{'exploits': <module 'routersploit.exploits' from 'routersploit/exploits.pyc'>, 'mute': <function mute at 0x7fb92ad31398>, 'utils': <module 'routersploit.utils' from 'routersploit/utils/__init__.pyc'>, 'print_status': <function print_status at 0x7fb92ad315f0>, 'print_error': <function print_error at 0x7fb92ad31578>, 'printer': <module 'routersploit.printer' from 'routersploit/printer.pyc'>, 'validators': <module 'routersploit.validators' from 'routersploit/validators.pyc'>, 'interpreter': <module 'routersploit.interpreter' from 'routersploit/interpreter.pyc'>, 'print_table': <function print_table at 0x7fb92ad31b18>, 'sanitize_url': <function sanitize_url at 0x7fb92ad31b90>, 'wordlists': <module 'routersploit.wordlists' from 'routersploit/wordlists/__init__.pyc'>, '__package__': None, 'payloads': <module 'routersploit.payloads' from 'routersploit/payloads.pyc'>, 'tokenize': <function tokenize at 0x7fb92ad31f50>, '__doc__': None, 'http_request': <function http_request at 0x7fb92ad31cf8>, 'shell': <function shell at 0x7fb92accf398>, 'ssh_interactive': <function ssh_interactive at 0x7fb92ad31de8>, '__builtins__': <module '__builtin__' (built-in)>, 'boolify': <function boolify at 0x7fb92ad31d70>, 'LockedIterator': <class 'routersploit.utils.LockedIterator'>, 'multi': <function multi at 0x7fb92ad31488>, '__name__': '__main__', 'modules': <module 'routersploit.modules' from 'routersploit/modules/__init__.pyc'>, 'routersploit': <module 'routersploit' from 'routersploit/__init__.pyc'>, 'index_modules': <function index_modules at 0x7fb92ad15c80>, 'print_success': <function print_success at 0x7fb92ad31668>, 'print_info': <function print_info at 0x7fb92ad316e0>, 'exceptions': <module 'routersploit.exceptions' from 'routersploit/exceptions.pyc'>, 'random_text': <function random_text at 0x7fb92ad31c80>}
>>> command = "show"
>>> command = "show payloads"
>>> interpreter.get_command_handler(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'get_command_handler'
>>> interpreter.start.get_command_handler(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'start'
>>> interpreter.BaseInterpreter.start.get_command_handler(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute 'get_command_handler'
>>> interpreter.BaseInterpreter(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter()
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ace76d0>
>>> print 0x7fb92ace76d0
140433263851216
>>> test = interpreter.BaseInterpreter(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> test = interpreter.BaseInterpreter()
>>> print test
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ace7710>
>>> test = interpreter.BaseInterpreter(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> test = interpreter.BaseInterpreter()
>>> command = use scanners/autopwn
File "<stdin>", line 1
command = use scanners/autopwn
^
SyntaxError: invalid syntax
>>> command = "use scanners/autopwn"
>>> test = interpreter.BaseInterpreter()
>>> interpreter.BaseInterpreter()
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ac99790>
>>> print
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'wordlists']
>>> printer
<module 'routersploit.printer' from 'routersploit/printer.pyc'>
>>> test = printer
>>> print test
<module 'routersploit.printer' from 'routersploit/printer.pyc'>
>>> command = "run"
>>> interpreter.BaseInterpreter()
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ac90690>
>>> print_error
<function print_error at 0x7fb92ad31578>
>>> print command
run
>>> print_info
<function print_info at 0x7fb92ad316e0>
>>> print_status
<function print_status at 0x7fb92ad315f0>
>>> print_success
<function print_success at 0x7fb92ad31668>
>>> help(__builtins__)
>>> __getattribute__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__getattribute__' is not defined
>>> __getattribute__(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__getattribute__' is not defined
>>> __getattribute__(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__getattribute__' is not defined
>>> help(__builtins__)
>>> dict
<type 'dict'>
>>> dict
<type 'dict'>
>>> dict command
File "<stdin>", line 1
dict command
^
SyntaxError: invalid syntax
>>> doct get_command
File "<stdin>", line 1
doct get_command
^
SyntaxError: invalid syntax
>>> doct get_command()
File "<stdin>", line 1
doct get_command()
^
SyntaxError: invalid syntax
>>> locals()
{'exploits': <module 'routersploit.exploits' from 'routersploit/exploits.pyc'>, 'mute': <function mute at 0x7fb92ad31398>, 'utils': <module 'routersploit.utils' from 'routersploit/utils/__init__.pyc'>, 'print_status': <function print_status at 0x7fb92ad315f0>, 'print_error': <function print_error at 0x7fb92ad31578>, 'printer': <module 'routersploit.printer' from 'routersploit/printer.pyc'>, 'validators': <module 'routersploit.validators' from 'routersploit/validators.pyc'>, 'interpreter': <module 'routersploit.interpreter' from 'routersploit/interpreter.pyc'>, 'print_table': <function print_table at 0x7fb92ad31b18>, 'sanitize_url': <function sanitize_url at 0x7fb92ad31b90>, 'wordlists': <module 'routersploit.wordlists' from 'routersploit/wordlists/__init__.pyc'>, '__package__': None, 'payloads': <module 'routersploit.payloads' from 'routersploit/payloads.pyc'>, 'tokenize': <function tokenize at 0x7fb92ad31f50>, 'test': <module 'routersploit.printer' from 'routersploit/printer.pyc'>, 'command': 'run', '__doc__': None, 'http_request': <function http_request at 0x7fb92ad31cf8>, 'shell': <function shell at 0x7fb92accf398>, 'ssh_interactive': <function ssh_interactive at 0x7fb92ad31de8>, '__builtins__': <module '__builtin__' (built-in)>, 'boolify': <function boolify at 0x7fb92ad31d70>, 'LockedIterator': <class 'routersploit.utils.LockedIterator'>, 'multi': <function multi at 0x7fb92ad31488>, '__name__': '__main__', 'modules': <module 'routersploit.modules' from 'routersploit/modules/__init__.pyc'>, 'routersploit': <module 'routersploit' from 'routersploit/__init__.pyc'>, 'index_modules': <function index_modules at 0x7fb92ad15c80>, 'print_success': <function print_success at 0x7fb92ad31668>, 'print_info': <function print_info at 0x7fb92ad316e0>, 'exceptions': <module 'routersploit.exceptions' from 'routersploit/exceptions.pyc'>, 'random_text': <function random_text at 0x7fb92ad31c80>}
>>> BaseInterpreter(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> interpreter.BaseInterpreter(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter()
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ac99790>
>>> command = "exit"
>>> interpreter.BaseInterpreter()
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ac90690>
>>> help(__doc__)
>>> list
<type 'list'>
>>> list __builtins__
File "<stdin>", line 1
list __builtins__
^
SyntaxError: invalid syntax
>>> list main
File "<stdin>", line 1
list main
^
SyntaxError: invalid syntax
>>> list command
File "<stdin>", line 1
list command
^
SyntaxError: invalid syntax
>>> list(print_status)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'function' object is not iterable
>>> list(wordlists)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not iterable
>>> list(modules)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not iterable
>>> help(list)
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'wordlists']
>>> help(command)
>>> command(__dict__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> dict(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: dictionary update sequence element #0 has length 1; 2 is required
>>> __dict__(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> help(command)
>>> help(command)
KeyboardInterrupt
>>> command(__weakref__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__weakref__' is not defined
>>> help(command)
>>> call(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'call' is not defined
>>> command(call)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'call' is not defined
>>> command(__call__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__call__' is not defined
>>> command.call('show payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'str' object has no attribute 'call'
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'wordlists']
>>> interpreter.dir()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'dir'
>>> get.command.call
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'get' is not defined
>>> get_command.call
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'get_command' is not defined
>>> interpreter.get_command.call
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'get_command'
>>> interpreter.BaseInterpreter()
<routersploit.interpreter.BaseInterpreter object at 0x7fb92ac76510>
>>> interpreter.BaseInterpreter(dir)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter.setup()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method setup() must be called with BaseInterpreter instance as first argument (got nothing instead)
>>> interpreter.BaseInterpreter.parse_line.call("show payloads")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute 'call'
>>> interpreter.BaseInterpreter.parse_line("show payloads")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method parse_line() must be called with BaseInterpreter instance as first argument (got str instance instead)
>>> interpreter.BaseInterpreter("show payloads")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.BaseInterpreter(__init__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__init__' is not defined
>>> interpreter.BaseInterpreter.__init__
<unbound method BaseInterpreter.__init__>
>>> interpreter.BaseInterpreter.__init__."show payloads"
File "<stdin>", line 1
interpreter.BaseInterpreter.__init__."show payloads"
^
SyntaxError: invalid syntax
>>> interpreter.BaseInterpreter.__init__.show
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute 'show'
>>> interpreter.BaseInterpreter.__init__.parse_line
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute 'parse_line'
>>> interpreter.BaseInterpreter.parse_line
<unbound method BaseInterpreter.parse_line>
>>> interpreter.BaseInterpreter.parse_line(self,"showpayloads")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> interpreter.BaseInterpreter.get_command_handler('show')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method get_command_handler() must be called with BaseInterpreter instance as first argument (got str instance instead)
>>> interpreter.BaseInterpreter(get_command_handler('show'))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'get_command_handler' is not defined
>>> interpreter.BaseInterpreter().parse_line(self,"showpayloads")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> interpreter.BaseInterpreter().parse_line(self,"showpayloads")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> interpreter.BaseInterpreter().parse_line("showpayloads")
('showpayloads', '')
>>> interpreter.BaseInterpreter().parse_line("show payloads")
('show', 'payloads')
>>> interpreter.BaseInterpreter().parse_line("use scanners/autopwn")
('use', 'scanners/autopwn')
>>> interpreter.BaseInterpreter().parse_line("set target 192.168.1.1")
('set', 'target 192.168.1.1')
>>> interpreter.BaseInterpreter().parse_line("set target 192.168.1.1")
('set', 'target 192.168.1.1')
>>> interpreter.BaseInterpreter().parse_line("run")
('run', '')
>>> interpreter.BaseInterpreter().parse_line("show target")
('show', 'target')
>>> interpreter.BaseInterpreter().parse_line("0v")
('0v', '')
>>> interpreter.BaseInterpreter().parse_line("-v")
('-v', '')
>>> interpreter.BaseInterpreter().parse_line("exec uname")
('exec', 'uname')
>>> interpreter.BaseInterpreter().parse_line("ssh -p 666 root@70.170.54.53")
('ssh', '-p 666 root@70.170.54.53')
>>> complete_command.call
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'complete_command' is not defined
>>> complete(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'complete' is not defined
>>> complete(command,0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'complete' is not defined
>>> complete('show',0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'complete' is not defined
>>> interpreter.BaseInterpreter().parse_line("ssh -p 666 root@70.170.54.53")
('ssh', '-p 666 root@70.170.54.53')
>>> print command
exit
>>> print history_file
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'history_file' is not defined
>>> print RoutersploitInterpreter.history_file
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> print RoutersploitInterpreter().history_file
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> print RoutersploitInterpreter(BaseInterpreter).history_file
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> print interpreter.RoutersploitInterpreter(BaseInterpreter).history_file
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> print interpreter.RoutersploitInterpreter().history_file
/root/.rsf_history
>>> run
KeyboardInterrupt
>>> RoutersploitInterpreter()._parse_prompt('show all')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> RoutersploitInterpreter().command_use('scanners/autopwn')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> RoutersploitInterpreter().BaseInterpreter().command_use('scanners/autopwn')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> interpreter.BaseInterpreter().command_use('scanners/autopwn')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'BaseInterpreter' object has no attribute 'command_use'
>>> utils.command_use('scanners/autopwn')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'command_use'
>>> utils.stop.after.command_use('scanners/autopwn')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'stop'
>>> utils.stop_after.command_use('scanners/autopwn')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute 'command_use'
>>> interpreter.BaseInterpreter()._show_all()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'BaseInterpreter' object has no attribute '_show_all'
>>> interpreter.BaseInterpreter()._show_modules()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'BaseInterpreter' object has no attribute '_show_modules'
>>> interpreter.utils._show_modules()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute '_show_modules'
>>> interpreter.utils.module_required._show_modules()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_modules'
>>> interpreter.utils.module_required()._show_modules()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: module_required() takes exactly 1 argument (0 given)
>>> interpreter.utils.module_required()._show_modules('root')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: module_required() takes exactly 1 argument (0 given)
>>> interpreter.utils.module_required()._show_modules('')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: module_required() takes exactly 1 argument (0 given)
>>> interpreter.utils.module_required()._show_modules(' ')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: module_required() takes exactly 1 argument (0 given)
>>> interpreter.utils.__show_modules('scanners')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute '__show_modules'
>>> interpreter.utils().__show_modules('scanners')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not callable
>>> interpreter.__show_modules('scanners')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute '__show_modules'
>>> interpreter.@utils.module_required.__show_modules('scanners')
File "<stdin>", line 1
interpreter.@utils.module_required.__show_modules('scanners')
^
SyntaxError: invalid syntax
>>> interpreter.utils.module_required.__show_modules('scanners')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '__show_modules'
>>> interpreter.utils.module_required.__show_modules('scanners'
...
...
...
KeyboardInterrupt
>>> var = interpreter.utils.module_required.__show_modules('scanners'
...
KeyboardInterrupt
>>> var = interpreter.utils.module_required.__show_modules('scanners')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '__show_modules'
>>> self.__show_modules('scanners')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> _show_scanners()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '_show_scanners' is not defined
>>> interpreter._show_scanners()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute '_show_scanners'
>>> interpreter.utils._show_scanners()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute '_show_scanners'
>>> interpreter.utils.print_info_show_scanners()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'print_info_show_scanners'
>>> interpreter.utils.print_info._show_scanners()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_scanners'
>>> _show_scanners(self, *args, **kwargs):
File "<stdin>", line 1
_show_scanners(self, *args, **kwargs):
^
SyntaxError: invalid syntax
>>> self.__show_modules('scanners')
File "<stdin>", line 1
self.__show_modules('scanners')
^
IndentationError: unexpected indent
>>> _show_scanners(self)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '_show_scanners' is not defined
>>> _show_scanners(self,,)
File "<stdin>", line 1
_show_scanners(self,,)
^
SyntaxError: invalid syntax
>>> _show_scanners(self,'','')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '_show_scanners' is not defined
>>> utils.module_required._show_devices
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_devices'
>>> utils.module_required._show_devices()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_devices'
>>> utils.module_required._show_devices(services)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_devices'
>>> utils.module_required._show_devices('devices')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_devices'
>>> interpreter.utils.module_required._show_devices('devices')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'function' object has no attribute '_show_devices'
>>> /root/Documents/routersploit/routersploit/interpreter.pyinterpreter.utils.module_required._show_devices('devices')
KeyboardInterrupt
>>> RoutersploitInterpreter.available_modules_completion('payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> BaseInterpreter.available_modules_completion('payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> interpreter.available_modules_completion('payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'available_modules_completion'
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'wordlists']
>>> help()
Welcome to Python 2.7! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/2.7/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
help>
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
>>> help(__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__' is not defined
>>> help(_init__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '_init__' is not defined
>>> BaseInterpreter.available_modules_completion('payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> RoutersploitInterpreter(BaseInterpreter).available_modules_completion('payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> RoutersploitInterpreter('payloads')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> help(RoutersploitInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> dir(RoutersploitInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> /root/Documents/routersploit/routersploit/interpreter.py
KeyboardInterrupt
>>> help(BaseInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> dir(BaseInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'BaseInterpreter' is not defined
>>> dir(interpreter)
['BaseInterpreter', 'BasePayload', 'Counter', 'Exploit', 'GLOBAL_OPTS', 'PrinterThread', 'RoutersploitException', 'RoutersploitInterpreter', '__builtins__', '__doc__', '__file__', '__name__', '__package__', 'atexit', 'itertools', 'os', 'print_function', 'printer_queue', 'readline', 'sys', 'traceback', 'utils']
>>> help(interpreter.RoutersploitInterpreter)
>>> interpreter.RoutersploitInterpreter.start()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method start() must be called with RoutersploitInterpreter instance as first argument (got nothing instead)
>>> interpreter.RoutersploitInterpreter.start('show')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method start() must be called with RoutersploitInterpreter instance as first argument (got str instance instead)
>>> interpreter.RoutersploitInterpreter(start)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'start' is not defined
>>> RoutersploitInterpreter(start)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> help(RoutersploitInterpreter)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> RoutersploitInterpreter.start
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> RoutersploitInterpreter().start
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> RoutersploitInterpreter().start.()
File "<stdin>", line 1
RoutersploitInterpreter().start.()
^
SyntaxError: invalid syntax
>>> RoutersploitInterpreter.start.()
File "<stdin>", line 1
RoutersploitInterpreter.start.()
^
SyntaxError: invalid syntax
>>> RoutersploitInterpreter(BaseInterpreter).start.()
File "<stdin>", line 1
RoutersploitInterpreter(BaseInterpreter).start.()
^
SyntaxError: invalid syntax
>>> global_help
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'global_help' is not defined
>>> RoutersploitInterpreterglobal_help
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreterglobal_help' is not defined
>>> RoutersploitInterpreter.global_help
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'RoutersploitInterpreter' is not defined
>>> interpreter.RoutersploitInterpreter.global_help
'Global commands:\n help Print this help menu\n use <module> Select a module for usage\n exec <shell command> <args> Execute a command in a shell\n search <search term> Search for appropriate module\n exit Exit RouterSploit'
>>> interpreter.RoutersploitInterpreter.search payloads
File "<stdin>", line 1
interpreter.RoutersploitInterpreter.search payloads
^
SyntaxError: invalid syntax
>>> interpreter.RoutersploitInterpreter.search
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: type object 'RoutersploitInterpreter' has no attribute 'search'
>>> interpreter.RoutersploitInterpreter.show
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: type object 'RoutersploitInterpreter' has no attribute 'show'
>>> interpreter.RoutersploitInterpreter.use scanners/autopwn
File "<stdin>", line 1
interpreter.RoutersploitInterpreter.use scanners/autopwn
^
SyntaxError: invalid syntax
>>> interpreter.RoutersploitInterpreter.use
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: type object 'RoutersploitInterpreter' has no attribute 'use'
>>> interpreter.RoutersploitInterpreter.global_help use
File "<stdin>", line 1
interpreter.RoutersploitInterpreter.global_help use
^
SyntaxError: invalid syntax
>>> interpreter.RoutersploitInterpreter.commands()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method commands() must be called with RoutersploitInterpreter instance as first argument (got nothing instead)
>>> interpreter.RoutersploitInterpreter.commands(self)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> interpreter.RoutersploitInterpreter(commands)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'commands' is not defined
>>> interpreter.RoutersploitInterpreter('commands')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>> interpreter.RoutersploitInterpreter()
<routersploit.interpreter.RoutersploitInterpreter object at 0x7fb92ac80250>
>>> interpreter.BaseInterpreter.commands()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method commands() must be called with BaseInterpreter instance as first argument (got nothing instead)
>>> interpreter.BaseInterpreter(commands)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'commands' is not defined
>>> interpreter.BaseInterpreter(commands.())
File "<stdin>", line 1
interpreter.BaseInterpreter(commands.())
^
SyntaxError: invalid syntax
>>> interpreter.BaseInterpreter('commands')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() takes exactly 1 argument (2 given)
>>>
>>> interpreter.BaseInterpreter.init('commands')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: type object 'BaseInterpreter' has no attribute 'init'
>>> interpreter.BaseInterpreter.__init__('commands')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method __init__() must be called with BaseInterpreter instance as first argument (got str instance instead)
>>>
>>> interpreter.BaseInterpreter.commands()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method commands() must be called with BaseInterpreter instance as first argument (got nothing instead)
>>> interpreter.BaseInterpreter.commands
<unbound method BaseInterpreter.commands>
>>> interpreter.BaseInterpreter.commands
<unbound method BaseInterpreter.commands>
>>>
KeyboardInterrupt
>>>
KeyboardInterrupt
>>> var = interpreter.BaseInterpreter.commands
>>> print var
<unbound method BaseInterpreter.commands>
>>> var = interpreter.BaseInterpreter(self).commands
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> var = interpreter.BaseInterpreter(self).commands(self)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> var = interpreter.BaseInterpreter(self).commands()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> var = interpreter.BaseInterpreter().commands()
>>> var = interpreter.BaseInterpreter().commands()
>>> print var
[]
>>> uname
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'uname' is not defined
>>> interpreter.BaseInterpreter.commands
<unbound method BaseInterpreter.commands>
>>> interpreter.BaseInterpreter.commands()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unbound method commands() must be called with BaseInterpreter instance as first argument (got nothing instead)
>>> interpreter.BaseInterpreter().commands()
[]
>>> interpreter.RoutersploitInterpreter().commands()
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> interpreter.RoutersploitInterpreter().commands('sudo su')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> interpreter.RoutersploitInterpreter().commands('show payloads')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> interpreter.RoutersploitInterpreter().commands('use scanners/autopwn')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> interpreter.RoutersploitInterpreter().commands('set target 192.168.1.1')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> interpreter.RoutersploitInterpreter().commands('set port 80')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> run
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'run' is not defined
>>> interpreter.RoutersploitInterpreter().commands('run')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> interpreter.RoutersploitInterpreter().commands('help')
['back', 'check', 'exec', 'exit', 'exploit', 'help', 'run', 'search', 'set', 'setg', 'show', 'unsetg', 'use']
>>> license
Type license() to see the full license text
>>> license()
A. HISTORY OF THE SOFTWARE
==========================
Python was created in the early 1990s by Guido van Rossum at Stichting
Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
as a successor of a language called ABC. Guido remains Python's
principal author, although it includes many contributions from others.
In 1995, Guido continued his work on Python at the Corporation for
National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
in Reston, Virginia where he released several versions of the
software.
In May 2000, Guido and the Python core development team moved to
BeOpen.com to form the BeOpen PythonLabs team. In October of the same
year, the PythonLabs team moved to Digital Creations, which became
Zope Corporation. In 2001, the Python Software Foundation (PSF, see
https://www.python.org/psf/) was formed, a non-profit organization
created specifically to own Python-related Intellectual Property.
Zope Corporation was a sponsoring member of the PSF.
All Python releases are Open Source (see http://www.opensource.org for
the Open Source Definition). Historically, most, but not all, Python
Hit Return for more, or q (and Return) to quit:
releases have also been GPL-compatible; the table below summarizes
the various releases.
Release Derived Year Owner GPL-
from compatible? (1)
0.9.0 thru 1.2 1991-1995 CWI yes
1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
1.6 1.5.2 2000 CNRI no
2.0 1.6 2000 BeOpen.com no
1.6.1 1.6 2001 CNRI yes (2)
2.1 2.0+1.6.1 2001 PSF no
2.0.1 2.0+1.6.1 2001 PSF yes
2.1.1 2.1+2.0.1 2001 PSF yes
2.1.2 2.1.1 2002 PSF yes
2.1.3 2.1.2 2002 PSF yes
2.2 and above 2.1.1 2001-now PSF yes
Footnotes:
(1) GPL-compatible doesn't mean that we're distributing Python under
the GPL. All Python licenses, unlike the GPL, let you distribute
a modified version without making your changes open source. The
Hit Return for more, or q (and Return) to quit:
GPL-compatible licenses make it possible to combine Python with
other software that is released under the GPL; the others don't.
(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
because its license has a choice of law clause. According to
CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
is "not incompatible" with the GPL.
Thanks to the many outside volunteers who have worked under Guido's
direction to make these releases possible.
B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
===============================================================
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
--------------------------------------------
1. This LICENSE AGREEMENT is between the Python Software Foundation
("PSF"), and the Individual or Organization ("Licensee") accessing and
otherwise using this software ("Python") in source or binary form and
its associated documentation.
Hit Return for more, or q (and Return) to quit:
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python alone or in any derivative version,
provided, however, that PSF's License Agreement and PSF's notice of copyright,
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights
Reserved" are retained in Python alone or in any derivative version prepared by
Licensee.
3. In the event Licensee prepares a derivative work that is based on
or incorporates Python or any part thereof, and wants to make
the derivative work available to others as provided herein, then
Licensee hereby agrees to include in any such work a brief summary of
the changes made to Python.
4. PSF is making Python available to Licensee on an "AS IS"
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
Hit Return for more, or q (and Return) to quit:
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material
breach of its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any
relationship of agency, partnership, or joint venture between PSF and
Licensee. This License Agreement does not grant permission to use PSF
trademarks or trade name in a trademark sense to endorse or promote
products or services of Licensee, or any third party.
8. By copying, installing or otherwise using Python, Licensee
agrees to be bound by the terms and conditions of this License
Agreement.
BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
-------------------------------------------
BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
Hit Return for more, or q (and Return) to quit:
1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
Individual or Organization ("Licensee") accessing and otherwise using
this software in source or binary form and its associated
documentation ("the Software").
2. Subject to the terms and conditions of this BeOpen Python License
Agreement, BeOpen hereby grants Licensee a non-exclusive,
royalty-free, world-wide license to reproduce, analyze, test, perform
and/or display publicly, prepare derivative works, distribute, and
otherwise use the Software alone or in any derivative version,
provided, however, that the BeOpen Python License is retained in the
Software, alone or in any derivative version prepared by Licensee.
3. BeOpen is making the Software available to Licensee on an "AS IS"
basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
INFRINGE ANY THIRD PARTY RIGHTS.
4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
Hit Return for more, or q (and Return) to quit: q
>>> dir(class)
File "<stdin>", line 1
dir(class)
^
SyntaxError: invalid syntax
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'var', 'wordlists']
>>> help(command)
>>> help(command)
>>> help(command(__dict__))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> __dict__(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__dict__' is not defined
>>> dict(command)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: dictionary update sequence element #0 has length 1; 2 is required
>>> interpreter
<module 'routersploit.interpreter' from 'routersploit/interpreter.pyc'>
>>> printer interpreter
File "<stdin>", line 1
printer interpreter
^
SyntaxError: invalid syntax
>>> modules
<module 'routersploit.modules' from 'routersploit/modules/__init__.pyc'>
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'var', 'wordlists']
>>> utils
<module 'routersploit.utils' from 'routersploit/utils/__init__.pyc'>
>>> ()
()
>>> routersploit.utils
<module 'routersploit.utils' from 'routersploit/utils/__init__.pyc'>
>>> routersploit.utils()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not callable
>>> multi()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: multi() takes exactly 1 argument (0 given)
>>> multi.()
File "<stdin>", line 1
multi.()
^
SyntaxError: invalid syntax
>>> multi(self)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> dir(multi)
['__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__doc__', '__format__', '__get__', '__getattribute__', '__globals__', '__hash__', '__init__', '__module__', '__name__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'func_closure', 'func_code', 'func_defaults', 'func_dict', 'func_doc', 'func_globals', 'func_name']
>>> globals(multi)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: globals() takes no arguments (1 given)
>>> vars(multi))
File "<stdin>", line 1
vars(multi))
^
SyntaxError: invalid syntax
>>> vars(multi)
{}
>>> __class__(multi))
File "<stdin>", line 1
__class__(multi))
^
SyntaxError: invalid syntax
>>> __list__(multi)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__list__' is not defined
>>> help(multi)
>>> __getattribute__(__hash__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__getattribute__' is not defined
>>> help(__hash__)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name '__hash__' is not defined
>>> print(hash)
<built-in function hash>
>>> hash.()
File "<stdin>", line 1
hash.()
^
SyntaxError: invalid syntax
>>> hash()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: hash() takes exactly one argument (0 given)
>>> hash(self)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> dir()
['LockedIterator', '__builtins__', '__doc__', '__name__', '__package__', 'boolify', 'command', 'exceptions', 'exploits', 'http_request', 'index_modules', 'interpreter', 'modules', 'multi', 'mute', 'payloads', 'print_error', 'print_info', 'print_status', 'print_success', 'print_table', 'printer', 'random_text', 'routersploit', 'sanitize_url', 'shell', 'ssh_interactive', 'test', 'tokenize', 'utils', 'validators', 'var', 'wordlists']
>>> validators()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not callable
>>> validators(dir)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not callable
>>> dir(validators)
['OptionValidationError', '__builtins__', '__doc__', '__file__', '__name__', '__package__', 'address', 'boolify', 'choice', 'convert_ip', 'convert_port', 'integer', 'ipv4', 'socket', 'strtobool', 'url', 'urlparse']
>>> exceptions()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not callable
>>> modules()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: 'module' object is not callable
>>> modules.()
File "<stdin>", line 1
modules.()
^
SyntaxError: invalid syntax
>>> module()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'module' is not defined
>>> index_modules()
['exploits.routers.cisco.ucs_manager_rce', 'exploits.routers.cisco.video_surv_path_traversal', 'exploits.routers.cisco.ios_http_authorization_bypass', 'exploits.routers.cisco.catalyst_2960_rocem', 'exploits.routers.cisco.unified_multi_path_traversal', 'exploits.routers.cisco.firepower_management60_path_traversal', 'exploits.routers.cisco.dpc2420_info_disclosure', 'exploits.routers.cisco.secure_acs_bypass', 'exploits.routers.cisco.firepower_management60_rce', 'exploits.routers.cisco.ucm_info_disclosure', 'exploits.routers.huawei.e5331_mifi_info_disclosure', 'exploits.routers.huawei.hg530_hg520b_password_disclosure', 'exploits.routers.huawei.hg630a_default_creds', 'exploits.routers.huawei.hg866_password_change', 'exploits.routers.huawei.hg520_info_dislosure', 'exploits.routers.multi.heartbleed', 'exploits.routers.multi.misfortune_cookie', 'exploits.routers.multi.shellshock', 'exploits.routers.multi.tcp_32764_rce', 'exploits.routers.multi.tcp_32764_info_disclosure', 'exploits.routers.multi.rom0', 'exploits.routers.multi.ssh_auth_keys', 'exploits.routers.2wire.4011g_5012nv_path_traversal', 'exploits.routers.2wire.gateway_auth_bypass', 'exploits.routers.juniper.screenos_backdoor', 'exploits.routers.tplink.archer_c2_c20i_rce', 'exploits.routers.tplink.wdr740nd_wdr740n_path_traversal', 'exploits.routers.tplink.wdr740nd_wdr740n_backdoor', 'exploits.routers.tplink.wdr842nd_wdr842n_configure_disclosure', 'exploits.routers.netgear.multi_rce', 'exploits.routers.netgear.r7000_r6400_rce', 'exploits.routers.netgear.wnr500_612v3_jnr1010_2010_path_traversal', 'exploits.routers.netgear.n300_auth_bypass', 'exploits.routers.netgear.multi_password_disclosure-2017-5521', 'exploits.routers.netgear.prosafe_rce', 'exploits.routers.netgear.jnr1010_path_traversal', 'exploits.routers.netgear.dgn2200_dnslookup_cgi_rce', 'exploits.routers.netgear.dgn2200_ping_cgi_rce', 'exploits.routers.linksys.1500_2500_rce', 'exploits.routers.linksys.smartwifi_password_disclosure', 'exploits.routers.linksys.wap54gv3_rce', 'exploits.routers.linksys.wrt100_110_rce', 'exploits.routers.fortinet.fortigate_os_backdoor', 'exploits.routers.asus.infosvr_backdoor_rce', 'exploits.routers.asus.rt_n16_password_disclosure', 'exploits.routers.ipfire.ipfire_proxy_rce', 'exploits.routers.ipfire.ipfire_shellshock', 'exploits.routers.belkin.g_plus_info_disclosure', 'exploits.routers.belkin.g_n150_password_disclosure', 'exploits.routers.belkin.play_max_prce', 'exploits.routers.belkin.n150_path_traversal', 'exploits.routers.belkin.n750_rce', 'exploits.routers.belkin.auth_bypass', 'exploits.routers.bhu.bhu_urouter_rce', 'exploits.routers.dlink.dir_825_path_traversal', 'exploits.routers.dlink.dir_645_password_disclosure', 'exploits.routers.dlink.dir_300_320_600_615_info_disclosure', 'exploits.routers.dlink.dir_645_815_rce', 'exploits.routers.dlink.dsl_2640b_dns_change', 'exploits.routers.dlink.dsl_2730b_2780b_526b_dns_change', 'exploits.routers.dlink.dwr_932_info_disclosure', 'exploits.routers.dlink.dvg_n5402sp_path_traversal', 'exploits.routers.dlink.dsp_w110_rce', 'exploits.routers.dlink.dsl_2730_2750_path_traversal', 'exploits.routers.dlink.dir_300_320_615_auth_bypass', 'exploits.routers.dlink.dcs_930l_auth_rce', 'exploits.routers.dlink.dwl_3200ap_password_disclosure', 'exploits.routers.dlink.dsl_2740r_dns_change', 'exploits.routers.dlink.dwr_932b_backdoor', 'exploits.routers.dlink.dir_300_645_815_upnp_rce', 'exploits.routers.dlink.multi_hedwig_cgi_exec', 'exploits.routers.dlink.dir_815_850l_rce', 'exploits.routers.dlink.dgs_1510_add_user', 'exploits.routers.dlink.multi_hnap_rce', 'exploits.routers.dlink.dir_300_600_rce', 'exploits.routers.dlink.dns_320l_327l_rce', 'exploits.routers.dlink.dsl_2750b_info_disclosure', 'exploits.routers.3com.officeconnect_rce', 'exploits.routers.3com.imc_info_disclosure', 'exploits.routers.3com.3cradsl72_info_disclosure', 'exploits.routers.3com.officeconnect_info_disclosure', 'exploits.routers.3com.ap8760_password_disclosure', 'exploits.routers.3com.imc_path_traversal', 'exploits.routers.netsys.multi_rce', 'exploits.routers.netcore.udp_53413_rce', 'exploits.routers.thomson.twg849_info_disclosure', 'exploits.routers.thomson.twg850_password_disclosure', 'exploits.routers.billion.5200w_rce', 'exploits.routers.billion.7700nr4_password_disclosure', 'exploits.routers.movistar.adsl_router_bhs_rta_path_traversal', 'exploits.routers.zyxel.p660hn-t_v1_rce', 'exploits.routers.zyxel.p660hn-t_v2_rce', 'exploits.routers.zyxel.d1000_rce', 'exploits.routers.zyxel.d1000_wifi_password_disclosure', 'exploits.routers.zyxel.zywall_usg_extract_hashes', 'exploits.routers.ubiquiti.airos_6_x', 'exploits.routers.comtrend.ct_5361t_password_disclosure', 'exploits.routers.asmax.ar_1004g_password_disclosure', 'exploits.routers.asmax.ar_804_gu_rce', 'exploits.routers.technicolor.tg784_authbypass', 'exploits.routers.technicolor.tc7200_password_disclosure', 'exploits.routers.technicolor.dwg855_authbypass', 'exploits.routers.technicolor.tc7200_password_disclosure_v2', 'exploits.routers.zte.f660_config_disclosure', 'exploits.routers.zte.zxv10_rce', 'exploits.routers.zte.f609_config_disclosure', 'exploits.routers.zte.f460_f660_backdoor', 'exploits.routers.zte.f6xx_default_root', 'exploits.routers.shuttle.915wm_dns_change', 'exploits.cameras.videoiq.videoiq_camera_path_traversal', 'exploits.cameras.multi.netwave_IP_camera', 'exploits.cameras.multi.jvc_vanderbilt_honeywell_path_traversal', 'exploits.cameras.multi.P2P_wificam_rce', 'exploits.cameras.multi.P2P_wificam_credential_disclosure', 'exploits.cameras.honeywell.hicc_1100pt_password_disclosure', 'exploits.cameras.dlink.dcs_930l_932l_auth_bypass', 'exploits.cameras.brickcom.corp_network_cameras_conf_disclosure', 'exploits.cameras.brickcom.users_cgi_cred_disclosure', 'exploits.cameras.grandstream.gxv3611hd_ip_camera_rce', 'exploits.cameras.siemens.CVMS2025_credentials_disclosure', 'exploits.misc.asus.b1m_projector_rce', 'exploits.misc.miele.pg8528_path_traversal', 'exploits.misc.wepresent.wipg1000_rce', 'payloads.mipsbe.reverse_tcp', 'payloads.mipsbe.bind_tcp', 'payloads.generic.netcat_reverse_tcp', 'payloads.generic.awk_bind_tcp', 'payloads.generic.awk_reverse_tcp', 'payloads.generic.netcat_bind_tcp', 'payloads.mipsle.reverse_tcp', 'payloads.mipsle.bind_tcp', 'payloads.armle.reverse_tcp', 'payloads.armle.bind_tcp', 'scanners.netgear_scan', 'scanners.cisco_scan', 'scanners.cameras_scan', 'scanners.asus_scan', 'scanners.technicolor_scan', 'scanners.linksys_scan', 'scanners.3com_scan', 'scanners.zte_scan', 'scanners.zyxel_scan', 'scanners.misc_scan', 'scanners.autopwn', 'scanners.movistar_scan', 'scanners.multi_scan', 'scanners.2wire_scan', 'scanners.grandstream_scan', 'scanners.shuttle_scan', 'scanners.netsys_scan', 'scanners.tplink_scan', 'scanners.comtrend_scan', 'scanners.routers_scan', 'scanners.thomson_scan', 'scanners.asmax_scan', 'scanners.ubiquiti_scan', 'scanners.belkin_scan', 'scanners.juniper_scan', 'scanners.netcore_scan', 'scanners.billion_scan', 'scanners.fortinet_scan', 'scanners.bhu_scan', 'scanners.ipfire_scan', 'scanners.dlink_scan', 'scanners.huawei_scan', 'creds.ftp_default', 'creds.http_basic_bruteforce', 'creds.ssh_default', 'creds.telnet_bruteforce', 'creds.ftp_bruteforce', 'creds.http_digest_default', 'creds.telnet_default', 'creds.snmp_bruteforce', 'creds.http_form_default', 'creds.http_form_bruteforce', 'creds.ssh_bruteforce', 'creds.http_digest_bruteforce', 'creds.http_basic_default']
>>>
>>>
"""
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
from test import prefetch_test
from mkimpreprocess.mkpreprocess import *
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
print('model_{}.pth'.format(mark))
model_path = opt.save_dir[:-4] if opt.save_dir.endswith('TEST') else opt.save_dir
opt.load_model = os.path.join(model_path, 'model_last.pth')
pr = [19.545927, 18.621721, 16.606655] # performance reference
for i in range(2): #0,1
# change mode_choice
if i==0:
opt.mode_choice = 0
else:
opt.mode_choice = 1
#print("In main.py, opt.test_scales = ", opt.test_scales)
# run the prefetch_test to get the ap stats
prefetch_test(opt)
# change the current best if needed
stat_file_dir = os.path.join(model_path, 'stats_car_detection_3d.txt')
result_dir = os.path.join(model_path, 'model_result.txt')
change_cb = read_ap_stat(stat_file_dir, result_dir, image_mode=i)
# if the total best percentage change updated, save the model as the model_best
if change_cb:
save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch, model)
# we save the model regardless to its performance so that we can choose to start from it if we find overfitting
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), epoch, model, optimizer)
# check with the pr
# if the percentage increase is the largest, save as best model.
# reset to image mode 0
opt.mode_choice = 0
# with torch.no_grad():
# log_dict_val, preds = trainer.val(epoch, val_loader)
# for k, v in log_dict_val.items():
# logger.scalar_summary('val_{}'.format(k), v, epoch)
# logger.write('{} {:8f} | '.format(k, v))
# if log_dict_val[opt.metric] < best:
# best = log_dict_val[opt.metric]
# save_model(os.path.join(opt.save_dir, 'model_best.pth'),
# epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
#logger.write('\n')
if epoch in opt.lr_step:
#save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt) |
import numpy as np
import theano
from theano.tensor import as_tensor_variable
from theano.gof import Op, Apply
from theano.gradient import DisconnectedType
from gp_kernel import kernel, symbolic_kernel
from fast_gp_block import post_mean, grad_post_mean
from fast_gp_block import post_cov_ys, grad_cov_ys, post_cov_y, grad_cov_y
import theano.tests.unittest_tools
class CovVec(Op):
def __init__(self, inducing_points, kernel, symbolic_kernel):
# We could have possibly defined the kernel symbolicly and
# turn that into non-symbolic version using theano.function,
# but it's 2 to 3 times slower than a pure numpy implementation.
self.kernel = kernel
self.symbolic_kernel = symbolic_kernel
self.t_diff = inducing_points - inducing_points[0]
self.post_cov_ys = post_cov_ys
self.grad_cov_vec = CovVecGrad()
def use_single_sample(self):
return
self.post_cov_ys = post_cov_y
self.grad_cov_vec.grad_cov_ys = grad_cov_y
def make_node(self, idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, ys):
idx_train = as_tensor_variable(idx_train)
w_train = as_tensor_variable(w_train)
idx_test = as_tensor_variable(idx_test)
w_test = as_tensor_variable(w_test)
gp_params = as_tensor_variable(gp_params)
indep_noise = as_tensor_variable(indep_noise)
ys = as_tensor_variable(ys)
return Apply(self, [idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, ys], [ys.type()])
def perform(self, node, inputs, outputs):
(idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, ys) = inputs
z, = outputs
u = self.kernel(self.t_diff, gp_params)
len_u = len(u)
z[0] = self.post_cov_ys(idx_train, w_train, idx_test, w_test,
u, len_u, indep_noise, ys)
def grad(self, inputs, g_outputs):
(idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, ys) = inputs
gz, = g_outputs
u = self.symbolic_kernel(self.t_diff, gp_params)
grad_u = theano.gradient.jacobian(u, gp_params)
return ([DisconnectedType()(), # idx_train
DisconnectedType()(), # w_train
DisconnectedType()(), # idx_test
DisconnectedType()()] + # w_test
self.grad_cov_vec(idx_train, w_train, idx_test, w_test, u,
gp_params, indep_noise, ys, gz, grad_u))
def connection_pattern(self, node):
return [[False], [False], [False], [False], [True], [True], [True]]
def infer_shape(self, node, shapes):
return [shapes[-1]]
class CovVecGrad(Op):
def __init__(self):
self.grad_cov_ys = grad_cov_ys
def make_node(self, idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, ys, gz, grad_u):
idx_train = as_tensor_variable(idx_train)
w_train = as_tensor_variable(w_train)
idx_test = as_tensor_variable(idx_test)
w_test = as_tensor_variable(w_test)
u = as_tensor_variable(u)
gp_params = as_tensor_variable(gp_params)
indep_noise = as_tensor_variable(indep_noise)
ys = as_tensor_variable(ys)
gz = as_tensor_variable(gz)
grad_u = as_tensor_variable(grad_u)
return Apply(self, [idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, ys, gz, grad_u],
[gp_params.type(), indep_noise.type(), ys.type()])
def perform(self, node, inputs, outputs):
(idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, ys, gz, grad_u) = inputs
# gz.shape: (#z, #test)
grad_gp_params, grad_noise, grad_ys = self.grad_cov_ys(
idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, ys, grad_u, gz)
outputs[0][0] = grad_gp_params
outputs[1][0] = grad_noise
outputs[2][0] = grad_ys
def infer_shape(self, node, shapes):
return [shapes[5], shapes[6], shapes[7]]
class PosteriorMean(Op):
def __init__(self, inducing_points, kernel, symbolic_kernel):
self.kernel = kernel
self.symbolic_kernel = symbolic_kernel
self.grad_posterior_mean = GradPosteriorMean()
self.t_diff = inducing_points - inducing_points[0]
def make_node(self, idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, y):
idx_train = as_tensor_variable(idx_train)
w_train = as_tensor_variable(w_train)
idx_test = as_tensor_variable(idx_test)
w_test = as_tensor_variable(w_test)
gp_params = as_tensor_variable(gp_params)
indep_noise = as_tensor_variable(indep_noise)
y = as_tensor_variable(y)
return Apply(self, [idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, y], [y.type()])
def perform(self, node, inputs, outputs):
(idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, y) = inputs
z, = outputs
u = self.kernel(self.t_diff, gp_params)
len_u = len(u)
z[0] = post_mean(idx_train, w_train, idx_test, w_test,
u, len_u, indep_noise, y)
def grad(self, inputs, g_outputs):
(idx_train, w_train, idx_test, w_test,
gp_params, indep_noise, y) = inputs
u = self.symbolic_kernel(self.t_diff, gp_params)
grad_u = theano.gradient.jacobian(u, gp_params)
gz, = g_outputs
grad_gp_params, grad_indep_noise = self.grad_posterior_mean(
idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, y, gz, grad_u)
return [DisconnectedType()(), # idx_train
DisconnectedType()(), # w_train
DisconnectedType()(), # idx_test
DisconnectedType()(), # w_test
grad_gp_params,
grad_indep_noise,
DisconnectedType()()] # y
def connection_pattern(self, node):
return [[False], [False], [False], [False], [True], [True], [False]]
def infer_shape(self, node, shapes):
# idx_test.shape: (#n_test, 2)
return [(shapes[2][0],)]
class GradPosteriorMean(Op):
def make_node(self, idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, y, gz, grad_u):
idx_train = as_tensor_variable(idx_train)
w_train = as_tensor_variable(w_train)
idx_test = as_tensor_variable(idx_test)
w_test = as_tensor_variable(w_test)
u = as_tensor_variable(u)
gp_params = as_tensor_variable(gp_params)
indep_noise = as_tensor_variable(indep_noise)
y = as_tensor_variable(y)
gz = as_tensor_variable(gz)
grad_u = as_tensor_variable(grad_u)
return Apply(self, [idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, y, gz, grad_u],
[gp_params.type(), indep_noise.type()])
def perform(self, node, inputs, outputs):
(idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, y, gz, grad_u) = inputs
# gz.shape: (#z, #test)
grad_gp_params, grad_noise = grad_post_mean(
idx_train, w_train, idx_test, w_test,
u, gp_params, indep_noise, y, grad_u, gz)
outputs[0][0] = grad_gp_params
outputs[1][0] = grad_noise
def infer_shape(self, node, shapes):
return [shapes[5], shapes[6]]
def main():
from fast_gp import sparse_w
np.random.seed(0)
n_data = 10
x = np.random.uniform(size=n_data)
#x = np.float32(x)
x = np.sort(x)
a = .1
b = 10
c = .001
mu = np.zeros(n_data)
cov = a * np.exp(-b * (x[:, np.newaxis] - x)**2) + c * np.eye(n_data)
y = np.random.multivariate_normal(mu, cov)
#print x
#print y
x_min, x_max = x.min(), x.max()
#len_u = 2048 + 1
#len_u = 1024 + 1
len_u = 64
extra_u = 2
margin = (x_max - x_min) / (len_u - extra_u * 2) * 2
u = np.linspace(x_min - margin, x_max + margin, len_u)
#x_test = u[1:]
x_test = np.linspace(x_min, x_max, 20)
idx_train, w_train = sparse_w(u, x)
idx_test, w_test = sparse_w(u, x_test)
len_test = len(x_test)
y_test = np.random.uniform(size=(2, len_test))
#y_test = np.random.uniform(size=(1, len_test))
post_mean = PosteriorMean(u, kernel, symbolic_kernel)
def sub_mean(t_gp_params, t_indep_noise):
return post_mean(idx_train, w_train, idx_test, w_test,
t_gp_params, t_indep_noise, y * 1)
print 'verify grad mean'
theano.tests.unittest_tools.verify_grad(sub_mean, [(a, b), c],
n_tests=5, eps=1.0e-7,
abs_tol=0.001, rel_tol=0.001)
#return
cov_vec = CovVec(u, kernel, symbolic_kernel)
t_idx_train = theano.tensor.imatrix()
t_w_train = theano.tensor.matrix()
t_idx_test = theano.tensor.imatrix()
t_w_test = theano.tensor.matrix()
t_gp_params = theano.tensor.vector()
t_indep_noise = theano.tensor.scalar()
t_ys = theano.tensor.matrix()
t_y = theano.tensor.vector()
v = cov_vec(t_idx_train, t_w_train, t_idx_test, t_w_test,
t_gp_params, t_indep_noise, t_ys)
ys1 = t_ys - v
v = cov_vec(t_idx_train, t_w_train, t_idx_test, t_w_test,
t_gp_params, t_indep_noise, ys1)
v_fn = theano.function([t_idx_train, t_w_train, t_idx_test, t_w_test,
t_gp_params, t_indep_noise, t_ys], v)
def vf(t_gp_params, t_indep_noise):
v = cov_vec(idx_train, w_train, idx_test, w_test,
t_gp_params, t_indep_noise, y_test)
ys1 = -y_test + v
v = cov_vec(idx_train, w_train, idx_test, w_test,
t_gp_params, t_indep_noise, ys1)
ys1 = y_test - v
v = cov_vec(idx_train, w_train, idx_test, w_test,
t_gp_params, t_indep_noise, ys1)
return v
print 'verify grad ##'
theano.tests.unittest_tools.verify_grad(vf, [(a, b), c],
n_tests=10, eps=1.0e-6,
#abs_tol=0.001, rel_tol=0.001)
abs_tol=0.01, rel_tol=0.01)
print '###'
vsum = v.sum()
vsum_fn = theano.function([t_idx_train, t_w_train,
t_idx_test, t_w_test,
t_gp_params, t_indep_noise, t_ys],
vsum)
v_ = vsum_fn(idx_train, w_train, idx_test, w_test, (a, b), c, y_test)
print v_
grad_vsum = theano.grad(vsum, [t_gp_params, t_indep_noise])
grad_vsum_fn = theano.function([t_idx_train,
t_w_train,
t_idx_test,
t_w_test,
t_gp_params,
t_indep_noise,
t_ys
],
grad_vsum,
#on_unused_input='ignore'
)
grad_v = grad_vsum_fn(idx_train, w_train, idx_test, w_test, (a, b), c,
y_test)
print 'grad v'
print grad_v
def sub_cov_vec(t_gp_params, t_indep_noise):
return cov_vec(idx_train, w_train, idx_test, w_test,
t_gp_params, t_indep_noise, y_test)
print 'verify grad'
theano.tests.unittest_tools.verify_grad(sub_cov_vec, [(a, b), c],
n_tests=5, eps=1.0e-5,
abs_tol=0.001, rel_tol=0.001)
v_fn = theano.function([t_idx_train, t_w_train, t_idx_test, t_w_test,
t_gp_params, t_indep_noise, t_ys], v)
v_ = v_fn(idx_train, w_train, idx_test, w_test, (a, b), c, y_test)
print 'v'
print v_
#return
var = v_fn(idx_train, w_train, idx_test, w_test, (a, b), c,
np.eye(len_test))
var = np.diag(var)
post_mean = PosteriorMean(u, kernel, symbolic_kernel)
pmean = post_mean(t_idx_train, t_w_train, t_idx_test, t_w_test,
t_gp_params, t_indep_noise, t_y)
pmean_fn = theano.function([t_idx_train, t_w_train, t_idx_test, t_w_test,
t_gp_params, t_indep_noise, t_y], pmean)
pmu = pmean_fn(idx_train, w_train, idx_test, w_test, (a, b), c, y)
#print var
import pylab as pl
pl.figure()
std2 = np.sqrt(var) * 2
color = 'b'
pl.fill_between(x_test, pmu - std2, pmu + std2, color=color,
edgecolor='none', alpha=.3)
pl.plot(x_test, pmu, '-', c=color)
pl.plot(x, y, 'o', c=color)
pl.show()
if __name__ == '__main__':
main()
|
# Copyright © 2011 MLstate
#
# This file is part of Opa.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pygments.lexer import RegexLexer
from pygments.token import *
import pygments
import pygments.formatters
import sys
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = [
'and','as',
'begin',
'css',
'database','db','do',
'else','end','external',
'forall',
'if','import',
'match',
'package','parser',
'rec',
'server',
'then','type',
'val',
'with',
'xml_parser'
]
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
pygments.lexer.include('comments'),
(r'\s+', Text),
],
'root': [
pygments.lexer.include('comments-and-spaces'),
# keywords
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@'+ident_re+r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Binary),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a '{'
# or else we will have errors when parsing {} for instance
(r'{', Operator, '#push'),
(r'}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type definitions
# so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?={)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly translate
# in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
pygments.lexer.include('comments-and-spaces'),
(r'->', Keyword.Type),
(r'', Keyword.Type, ('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type expressions
# record types, tuple types, type constructors, basic type and type variables
'type-1': [
pygments.lexer.include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
(r'', Keyword.Type, '#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
pygments.lexer.include('comments-and-spaces'),
(r'', Keyword.Type, ('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
pygments.lexer.include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
(r'', Keyword.Type, '#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
pygments.lexer.include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
(r'', Keyword.Type, '#pop'),
],
'type-arrow': [
pygments.lexer.include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc) correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
(r'', Keyword.Type, '#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
pygments.lexer.include('comments-and-spaces'),
(r'[^\(\)/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
pygments.lexer.include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'{', Keyword.Type, '#push'),
(r'}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# pygments.lexer.include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# (r'', Keyword.Type, ('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# pygments.lexer.include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# pygments.lexer.include('comments-and-spaces'),
# (r'}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the coy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'{', Operator, 'root'),
pygments.lexer.include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'{', Operator, 'root'),
pygments.lexer.include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?={)', String.Single, ('#pop', 'root')),
(r'{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'{', Operator, 'root'),
(r'.|\s+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
# when this file in runned with the interpreter,
# it colors all the opa files whose names are given
# on the command line
if __name__ == '__main__':
for i in sys.argv:
if i[-4:] == ".opa":
print("opening "+i)
code = open(i, 'r').read()
formatter = pygments.formatters.HtmlFormatter(full=True, linenos=True)
print pygments.highlight(code, OpaLexer(), formatter)
|
"""
Implements an EYAML-capable version of YAML Path.
Copyright 2018, 2019 William W. Kimball, Jr. MBA MSIS
"""
import re
from subprocess import run, PIPE, CalledProcessError
from os import access, sep, X_OK
from shutil import which
from typing import Any, Generator, List, Optional
from ruamel.yaml.comments import CommentedSeq, CommentedMap
from yamlpath import YAMLPath
from yamlpath.eyaml.enums import EYAMLOutputFormats
from yamlpath.enums import YAMLValueFormats
from yamlpath.eyaml.exceptions import EYAMLCommandException
from yamlpath.wrappers import ConsolePrinter
from yamlpath import Processor
class EYAMLProcessor(Processor):
"""
Extend Processor to understand EYAML values.
Note that due to a bug in the eyaml command at the time of this writing,
either both or neither of the public and private keys must be set. So,
even though you would normally need only the public key to encrypt values,
you must also supply the private key, anyway.
Parameters:
1. logger (ConsolePrinter) Instance of ConsolePrinter or subclass
2. data (Any) Parsed YAML data
3. **kwargs (Optional[str]) can contain the following keyword
parameters:
* binary (str) The external eyaml command to use when performing
data encryption or decryption; if no path is provided, the command
will be sought on the system PATH. Defaut="eyaml"
* publickey (Optional[str]) Fully-qualified path to the public key
for use with data encryption
* privatekey (Optional[str]) Fully-qualified path to the public key
for use with data decryption
Returns: N/A
Raises: N/A
"""
def __init__(self, logger: ConsolePrinter, data: Any,
**kwargs: Optional[str]) -> None:
self.eyaml: Optional[str] = kwargs.pop("binary", "eyaml")
self.publickey: Optional[str] = kwargs.pop("publickey", None)
self.privatekey: Optional[str] = kwargs.pop("privatekey", None)
super().__init__(logger, data)
# pylint: disable=locally-disabled,too-many-branches
def _find_eyaml_paths(
self, data: Any, build_path: str = ""
) -> Generator[YAMLPath, None, None]:
"""
Recursively generates a set of stringified YAML Paths, each entry
leading to an EYAML value within the evaluated YAML data.
Parameters:
1. data (Any) The parsed YAML data to process
2. build_path (str) A YAML Path under construction
Returns: (Generator[Path, None, None]) each YAML Path entry as they
are discovered
Raises: N/A
"""
if isinstance(data, CommentedSeq):
build_path += "["
for idx, ele in enumerate(data):
if hasattr(ele, "anchor") and ele.anchor.value is not None:
tmp_path = build_path + "&" + ele.anchor.value + "]"
else:
tmp_path = build_path + str(idx) + "]"
if self.is_eyaml_value(ele):
yield YAMLPath(tmp_path)
else:
for subpath in self._find_eyaml_paths(ele, tmp_path):
yield subpath
elif isinstance(data, CommentedMap):
if build_path:
build_path += "."
for key, val in data.non_merged_items():
tmp_path = build_path + str(key)
if self.is_eyaml_value(val):
yield YAMLPath(tmp_path)
else:
for subpath in self._find_eyaml_paths(val, tmp_path):
yield subpath
def find_eyaml_paths(self) -> Generator[YAMLPath, None, None]:
"""
Recursively generates a set of stringified YAML Paths, each entry
leading to an EYAML value within the evaluated YAML data.
Parameters: N/A
Returns: (Generator[Path, None, None]) each YAML Path entry as they
are discovered
Raises: N/A
"""
# Initiate the scan from the data root
for path in self._find_eyaml_paths(self.data):
yield path
def decrypt_eyaml(self, value: str) -> str:
"""
Decrypts an EYAML value.
Parameters:
1. value (str) The EYAML value to decrypt
Returns: (str) The decrypted value or the original value if it was not
actually encrypted.
Raises:
- `EYAMLCommandException` when the eyaml binary cannot be utilized
"""
if not self.is_eyaml_value(value):
return value
if not self._can_run_eyaml():
raise EYAMLCommandException("No accessible eyaml command.")
cmdstr: str = "{} decrypt --quiet --stdin".format(self.eyaml)
if self.publickey:
cmdstr += " --pkcs7-public-key={}".format(self.publickey)
if self.privatekey:
cmdstr += " --pkcs7-private-key={}".format(self.privatekey)
cmd: List[str] = cmdstr.split()
cleanval: str = str(value).replace("\n", "").replace(" ", "").rstrip()
bval: bytes = (cleanval + "\n").encode("ascii")
self.logger.debug(
"EYAMLPath::decrypt_eyaml: About to execute {} against:\n{}"
.format(cmdstr, cleanval)
)
try:
retval: str = run(
cmd,
stdout=PIPE,
input=bval
).stdout.decode('ascii').rstrip()
except CalledProcessError as ex:
raise EYAMLCommandException(
"The {} command cannot be run due to exit code: {}"
.format(self.eyaml, ex.returncode)
)
# Check for bad decryptions
self.logger.debug(
"EYAMLPath::decrypt_eyaml: Decrypted result: {}".format(retval)
)
if not retval or retval == cleanval:
raise EYAMLCommandException(
"Unable to decrypt value! Please verify you are using the"
+ " correct old EYAML keys and the value is not corrupt: {}"
.format(cleanval)
)
return retval
def encrypt_eyaml(self, value: str,
output: EYAMLOutputFormats = EYAMLOutputFormats.STRING
) -> str:
"""
Encrypts a value via EYAML.
Parameters:
1. value (str) the value to encrypt
2. output (EYAMLOutputFormats) the output format of the encryption
Returns: (str) The encrypted result or the original value if it was
already an EYAML encryption.
Raises:
- `EYAMLCommandException` when the eyaml binary cannot be utilized.
"""
if self.is_eyaml_value(value):
return value
if not self._can_run_eyaml():
raise EYAMLCommandException(
"The eyaml binary is not executable at {}.".format(self.eyaml)
)
cmdstr: str = ("{} encrypt --quiet --stdin --output={}"
.format(self.eyaml, output))
if self.publickey:
cmdstr += " --pkcs7-public-key={}".format(self.publickey)
if self.privatekey:
cmdstr += " --pkcs7-private-key={}".format(self.privatekey)
cmd: List[str] = cmdstr.split()
self.logger.debug(
"EYAMLPath::encrypt_eyaml: About to execute: {}"
.format(" ".join(cmd))
)
bval: bytes = (value + "\n").encode("ascii")
try:
retval: str = (
run(cmd, stdout=PIPE, input=bval, check=True)
.stdout
.decode('ascii')
.rstrip()
)
except CalledProcessError as ex:
raise EYAMLCommandException(
"The {} command cannot be run due to exit code: {}"
.format(self.eyaml, ex.returncode)
)
if not retval:
raise EYAMLCommandException(
("The {} command was unable to encrypt your value. Please"
+ " verify this process can run that command and read your"
+ " EYAML keys.").format(self.eyaml)
)
if output is EYAMLOutputFormats.BLOCK:
retval = re.sub(r" +", "", retval) + "\n"
self.logger.debug(
"EYAMLPath::encrypt_eyaml: Encrypted result:\n{}".format(retval)
)
return retval
def set_eyaml_value(self, yaml_path: YAMLPath, value: str,
output: EYAMLOutputFormats = EYAMLOutputFormats.STRING,
mustexist: bool = False) -> None:
"""
Encrypts a value and stores the result to zero or more nodes specified
via YAML Path.
Parameters:
1. yaml_path (Path) The YAML Path specifying which nodes are to
receive the encrypted value
2. value (any) The value to encrypt
3. output (EYAMLOutputFormats) the output format of the encryption
4. mustexist (bool) Indicates whether YAML Path must
specify a pre-existing node
Returns: N/A
Raises:
- `YAMLPathException` when YAML Path is invalid
"""
self.logger.verbose(
"Encrypting value(s) for {}."
.format(yaml_path)
)
encval: str = self.encrypt_eyaml(value, output)
emit_format: YAMLValueFormats = YAMLValueFormats.FOLDED
if output is EYAMLOutputFormats.STRING:
emit_format = YAMLValueFormats.DEFAULT
self.set_value(
yaml_path,
encval,
mustexist=mustexist,
value_format=emit_format
)
def get_eyaml_values(self, yaml_path: YAMLPath, mustexist: bool = False,
default_value: str = ""
) -> Generator[str, None, None]:
"""
Retrieves and decrypts zero or more EYAML nodes from YAML data at a
YAML Path.
Parameters:
1. yaml_path (Path) The YAML Path specifying which nodes to decrypt
2. mustexist (bool) Indicates whether YAML Path must specify a
pre-existing node; when False, the node will be created when
missing
3. default_value (str) The default value to add to the YAML data
when `mustexist=False` and yaml_path points to a non-existent
node
Returns: (str) The decrypted value or `default_value` when YAML Path
specifies a non-existant node
Raises:
- `YAMLPathException` when YAML Path is invalid
"""
self.logger.verbose(
"Decrypting value(s) at {}.".format(yaml_path)
)
for node in self.get_nodes(yaml_path, mustexist=mustexist,
default_value=default_value):
plain_text: str = self.decrypt_eyaml(node)
yield plain_text
def _can_run_eyaml(self) -> bool:
"""
Indicates whether this instance is capable of running the eyaml binary
as specified via its eyaml property.
Parameters: N/A
Returns: (bool) True when the present eyaml property indicates an
executable; False, otherwise
Raises: N/A
"""
binary: Optional[str] = EYAMLProcessor.get_eyaml_executable(self.eyaml)
if binary is None:
return False
self.eyaml = binary
return True
@staticmethod
def get_eyaml_executable(binary: Optional[str] = "eyaml") -> Optional[str]:
"""
Returns the full executable path to an eyaml binary or None when it
cannot be found or is not executable.
Parameters:
1. binary (str) The executable to test; if an absolute or relative
path is not provided, the system PATH will be searched for a
match to test
Returns: (str) None or the executable eyaml binary path
Raises: N/A
"""
if binary is None or not binary:
return None
if binary.find(sep) < 0:
binary = which(binary)
if binary is None:
return None
binary = str(binary)
if access(binary, X_OK):
return binary
return None
@staticmethod
def is_eyaml_value(value: str) -> bool:
"""
Indicates whether a value is EYAML-encrypted.
Parameters:
1. value (any) The value to check
Returns: (bool) True when the value is encrypted; False, otherwise
Raises: N/A
"""
if not isinstance(value, str):
return False
return value.replace("\n", "").replace(" ", "").startswith("ENC[")
|
from setuptools import setup, find_packages
URL = "https://github.com/uzh-dqbm-cmi/data-traffic-control"
PROJECT_URLS = {
"Bug Tracker": "https://github.com/uzh-dqbm-cmi/data-traffic-control/issues",
"Documentation": "https://data-traffic-control.readthedocs.io/en/latest/",
"Source Code": "https://github.com/uzh-dqbm-cmi/data-traffic-control",
}
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='datatc',
version='0.1.1',
author="Laura Kinkead",
description='Automate every-day interactions with your data.',
long_description=long_description,
long_description_content_type="text/markdown",
url=URL,
project_urls=PROJECT_URLS,
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
],
python_requires='>3.7.0',
install_requires=[
'dill',
'flake8',
'gitpython',
'pandas',
'pyyaml',
'xlrd',
],
extras_require={
'app': [
'dash',
'dash_cytoscape',
'dash_html_components',
'dash_core_components',
],
'pdf': ['pymupdf'],
'docs': ['sphinx_autodoc_typehints'],
},
entry_points={
'console_scripts': [
'datatc_app = datatc.dashboard.dashboard:main',
'datatc_list = datatc.data_directory:DataDirectoryManager.list_projects',
]
},
zip_safe=False)
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.contrib import admin
from corehq.apps.case_importer.tracking.models import CaseUploadRecord
class CaseUploadRecordAdmin(admin.ModelAdmin):
list_display = ['domain', 'task_id', 'upload_id']
search_fields = ['task_id__exact', 'upload_id__exact']
readonly_fields = ['upload_file_meta']
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(CaseUploadRecord, CaseUploadRecordAdmin)
|
from setuptools import setup, find_packages
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements=["scipy>=1.3"]
setup(
name="HCRSimPY",
version="1.0.2",
author="Kevin Hannay",
author_email="khannay24@gmail.com",
description="A package to simulate and analyze human circadian rhythms.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/khannay/HCRSimPY",
packages=find_packages(),
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
],
python_requires='>=3.6',
)
|
"""
The package provides a Space Imaging Simulator for Proximity Operations (SISPO)
The package creates images of a 3D object using blender. The images are render
in a flyby scenario. UCAC4 star catalogue to create the background. Afterwards
hese images are used with openMVG and openMVS to reconstruct the 3D model and
reconstruct the trajectory.
"""
import argparse
import cProfile
from datetime import datetime
import io
import json
import logging
###############################################################################
################## Hack to enable JPEG2000 format in OpenCV ###################
######## See https://github.com/opencv/opencv/issues/14058 for details ########
import os
os.environ["OPENCV_IO_ENABLE_JASPER"] = "TRUE"
###############################################################################
from pathlib import Path
import pstats
import sys
import time
from .__init__ import __version__
from .compression import *
from .reconstruction import *
from .sim import *
from .sim import utils
from .plugins import plugins
logger = logging.getLogger("sispo")
logger.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(funcName)s - %(message)s")
def _create_parser():
"""
Creates argparser for SISPO which can be used for CLI and options
"""
parser = argparse.ArgumentParser(usage="%(prog)s [OPTION] ...",
description=__file__.__doc__)
parser.add_argument("-i", "--inputdir",
action="store",
default=None,
type=str,
help="Path to 'definition.json' file")
parser.add_argument("-o", "--outputdir",
action="store",
default=None,
type=str,
help="Path to results directory")
parser.add_argument("-n", "--name",
action="store",
default=None,
type=str,
help="Name of simulation scenario")
parser.add_argument("--verbose",
action="store_true",
help="Verbose output, displays log also on STDOUT")
parser.add_argument("--with-sim",
action="store_true",
dest="with_sim",
help="If set, SISPO will simulate the scenario")
parser.add_argument("--with-render",
action="store_true",
dest="with_render",
help="If set, SISPO will render the scenario")
parser.add_argument("--with-compression",
action="store_true",
dest="with_compression",
help="If set, SISPO will compress images")
parser.add_argument("--with-reconstruction",
action="store_true",
dest="with_reconstruction",
help="If set, SISPO will attempt reconstruction.")
parser.add_argument("--restart",
action="store_true",
help="Use cProfiler and write results to log.")
parser.add_argument("--profile",
action="store_true",
help="Use cProfiler and write results to log.")
parser.add_argument("-v", "--version",
action="store_true",
help="Prints version number.")
parser.add_argument("--with-plugins",
action="store_true",
dest="with_plugins",
help="Plugins that are run before rendering.")
return parser
def read_input():
"""
Reads CLI input and then parses input file.
"""
parser = _create_parser()
args = parser.parse_args()
if args.version:
print(f"v{__version__}")
return None
inputfile = _parse_input_filepath(args.inputdir)
settings = read_input_file(inputfile)
settings["options"] = parser.parse_args(args=settings["options"])
if settings["options"].version:
print(f"v{__version__}")
if settings["options"].restart:
raise NotImplementedError()
else:
# If all options are false it is default case and all steps are done
if (not settings["options"].with_sim and
not settings["options"].with_render and
not settings["options"].with_compression and
not settings["options"].with_reconstruction):
settings["options"].with_sim = True
settings["options"].with_render = True
settings["options"].with_compression = True
settings["options"].with_reconstruction = True
settings = parse_input(settings)
if args.outputdir is not None:
res_dir = Path(args.outputdir).resolve()
res_dir = utils.check_dir(res_dir)
settings["res_dir"] = res_dir
if args.name is not None:
settings["name"] = args.name
return settings
def read_input_file(filename):
"""
Reads input from a given file.
:type filename: String
:param filename: Filename of a mission definition file.
"""
with open(str(filename), "r") as def_file:
settings = json.load(def_file)
return settings
def parse_input(settings):
"""
Parses settings from input file into correct data formats
:type settings: dict
:param settings: String based description of settings.
"""
if "simulation" not in settings:
logger.debug("No simulation settings provided!")
if "compression" not in settings:
logger.debug("No compression settings provided!")
if "reconstruction" not in settings:
logger.debug("No reconstruction settings provided!")
settings = _parse_paths(settings)
settings = _parse_flags(settings)
return settings
def _parse_paths(settings):
"""
Recursively parses all settings with _dir suffix to a Path object.
:type settings: dict
:param settings: Dictionary containing settings
"""
for key in settings.keys():
if "dir" in key:
if "res" in key:
path = utils.check_dir(settings[key])
else:
path = utils.check_dir(settings[key], False)
settings[key] = path
elif "file" in key:
file = Path(settings[key])
file = file.resolve()
if not file.is_file():
raise RuntimeError(f"File {file} does not exist.")
else:
settings[key] = file
elif isinstance(settings[key], dict):
settings[key] = _parse_paths(settings[key])
return settings
def _parse_flags(settings):
"""
Recursively parses all settings containing with_ prefix to a bool.
:type settings: dict
:param settings: Dictionary containing settings
"""
for key in settings.keys():
if "with" in key:
settings[key] = bool(settings[key])
elif isinstance(settings[key], dict):
settings[key] = _parse_flags(settings[key])
return settings
def _parse_input_filepath(filepath):
"""
Parse input file path either from CLI argument or default file path.
"""
if filepath is None:
root_dir = Path(__file__).resolve().parent.parent
filename = root_dir / "data" / "input" / "definition.json"
else:
filename = Path(filepath).resolve()
if not filename.exists():
root_dir = Path(__file__).resolve().parent.parent
filename = root_dir / "data" / "input" / filepath.name
return filename
def serialize(o):
"""
Serializes Path or Namespace objects into strings or dicts respectively.
"""
if isinstance(o, Path):
return str(o)
elif isinstance(o, argparse.Namespace):
return vars(o)
raise TypeError(f"Object of type {type(o)} not serializable!")
def main():
"""
Main function to run when executing file
"""
settings = read_input()
if settings is None:
return
if settings["options"].verbose:
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(logger_formatter)
logger.addHandler(stream_handler)
if settings["options"].profile:
pr = cProfile.Profile()
now = datetime.now().strftime("%Y-%m-%dT%H%M%S%z")
filename = (now + "_sispo.log")
log_dir = settings["res_dir"]
if not log_dir.is_dir:
Path.mkdir(log_dir)
log_file = log_dir / filename
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logger_formatter)
logger.addHandler(file_handler)
logger.debug("\n\n################### NEW SISPO LOG ###################\n")
logger.debug("Settings:")
logger.debug(f"{json.dumps(settings, indent=4, default=serialize)}")
sim_settings = settings["simulation"]
comp_settings = settings["compression"]
recon_settings = settings["reconstruction"]
if settings["options"].profile:
logger.debug("Start Profiling")
pr.enable()
t_start = time.time()
logger.debug("Run full pipeline")
if settings["options"].with_sim or settings["options"].with_render:
logger.debug("With either simulation or rendering")
env = Environment(**sim_settings, ext_logger=logger)
if settings["options"].with_sim:
env.simulate()
if settings["options"].with_plugins:
plugins.try_plugins(settings["plugins"], settings, env)
if settings["options"].with_render:
env.render()
if settings["options"].with_compression:
logger.debug("With compression")
comp = Compressor(**comp_settings, ext_logger=logger)
comp.comp_decomp_series()
if settings["options"].with_reconstruction:
logger.debug("With reconstruction")
recon = Reconstructor(**recon_settings, ext_logger=logger)
recon.reconstruct()
t_end = time.time()
if settings["options"].profile:
pr.disable()
logger.debug("Stop Profile")
s = io.StringIO()
sortby = pstats.SortKey.CUMULATIVE
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
logger.debug("\n##################### Pstats #####################\n")
logger.debug("\n" + s.getvalue() + "\n")
logger.debug("\n##################################################\n")
logger.debug(f"Total time: {t_end - t_start} s")
logger.debug("Finished sispo main")
def run():
"""Alias for :py:func:`main` ."""
main()
if __name__ == "__main__":
print("SISPO is a Python package.")
print("Either import in Python console or SISPO executable")
|
# Copyright 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib.services.network import versions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestNetworkVersionsClient(base.BaseServiceTest):
FAKE_INIT_VERSION = {
"version": {
"id": "v2.0",
"links": [
{
"href": "http://openstack.example.com/v2.0/",
"rel": "self"
},
{
"href": "http://docs.openstack.org/",
"rel": "describedby",
"type": "text/html"
}
],
"status": "CURRENT"
}
}
FAKE_VERSIONS_INFO = {
"versions": [FAKE_INIT_VERSION["version"]]
}
FAKE_VERSION_INFO = copy.deepcopy(FAKE_INIT_VERSION)
FAKE_VERSION_INFO["version"]["media-types"] = [
{
"base": "application/json",
"type": "application/vnd.openstack.network+json;version=2.0"
}
]
def setUp(self):
super(TestNetworkVersionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.versions_client = (
versions_client.NetworkVersionsClient
(fake_auth, 'compute', 'regionOne'))
def _test_versions_client(self, bytes_body=False):
self.check_service_client_function(
self.versions_client.list_versions,
'tempest.lib.common.rest_client.RestClient.raw_request',
self.FAKE_VERSIONS_INFO,
bytes_body,
200)
def test_list_versions_client_with_str_body(self):
self._test_versions_client()
def test_list_versions_client_with_bytes_body(self):
self._test_versions_client(bytes_body=True)
|
from .return_class import AbstractApiClass
from .nested_feature import NestedFeature
from .point_in_time_feature import PointInTimeFeature
class Schema(AbstractApiClass):
"""
A schema description for a feature
"""
def __init__(self, client, name=None, featureMapping=None, featureType=None, dataType=None):
super().__init__(client, None)
self.name = name
self.feature_mapping = featureMapping
self.feature_type = featureType
self.data_type = dataType
def __repr__(self):
return f"Schema(name={repr(self.name)}, feature_mapping={repr(self.feature_mapping)}, feature_type={repr(self.feature_type)}, data_type={repr(self.data_type)})"
def to_dict(self):
return {'name': self.name, 'feature_mapping': self.feature_mapping, 'feature_type': self.feature_type, 'data_type': self.data_type}
|
import time
import pytest
from polog.core.utils.time_limit import time_limit
def test_integer():
"""
Проверяем, что лимит устанавливается и срабатывает с числом в качестве аргумента.
"""
quant = 0.001
function = lambda number: [time.sleep(quant) for x in range(number)]
wrapper = time_limit(quant)
wrapped_function = wrapper(function)
with pytest.raises(TimeoutError):
wrapped_function(5)
wrapper = time_limit(quant * 10)
wrapped_function = wrapper(function)
wrapped_function(5)
def test_function_as_parameter():
"""
Проверяем, что лимит устанавливается и срабатывает с функцией в качестве аргумента.
"""
quant = 0.001
function = lambda number: [time.sleep(quant) for x in range(number)]
wrapper = time_limit(lambda: quant)
wrapped_function = wrapper(function)
with pytest.raises(TimeoutError):
wrapped_function(5)
wrapper = time_limit(lambda: quant * 10)
wrapped_function = wrapper(function)
wrapped_function(5)
def test_error_signature_function():
"""
Проверяем, что функция с некорректной сигнатурой не принимается.
"""
def test(kek):
pass
with pytest.raises(ValueError):
wrapper = time_limit(test)
test = lambda x: None
with pytest.raises(ValueError):
wrapper = time_limit(test)
def test_wrong_numbers():
"""
Проверяем, что, при попытке передать в конструктор декоратора недействительное значение, поднимается ValueError.
"""
with pytest.raises(ValueError):
@time_limit(-1)
def kek():
pass
with pytest.raises(ValueError):
# Нулевой таймаут тоже классифицируется как ошибка.
@time_limit(0)
def kek():
pass
def test_wrong_object():
"""
Проверяем, что, при попытке передать в конструктор декоратора не число и не функция, поднимается ValueError.
"""
with pytest.raises(ValueError):
@time_limit('kek')
def kek():
pass
def test_wrong_number_in_action():
"""
В случае, если переданная аргументом функция возвращает недействительное значение, она должна просто отрабатывать без таймаута.
Проверяем, что это так и происходит.
"""
flag = False
@time_limit(lambda: -1)
def function():
nonlocal flag
flag = True
function()
assert flag == True
|
""" Starting point of the script is a saving of all singular values and vectors
in alex_save/
We perform the 100-optimization implemented in optim_nn_pca_greedy
"""
import math
import torch
import torchvision
import numpy as np
from lipschitz_utils import *
from max_eigenvalue import k_generic_power_method
from experiments.bruteforce_optim import optim_nn_pca_greedy
alex = torchvision.models.alexnet(pretrained=True)
alex = alex.cuda()
for p in alex.parameters():
p.requires_grad = False
compute_module_input_sizes(alex, [1, 3, 224, 224])
n_sv = 1
U = torch.load('alex_save/feat-left-sing-Conv2d-8')
# Indices of convolutions and linear layers
convs = [0, 3, 6, 8, 10]
lins = [1, 4, 6]
lip_spectral = 1
lip = 1
##############
# Convolutions
##############
for i in range(len(convs) - 1):
print('Dealing with convolution {}'.format(i))
U = torch.load('alex_save/feat-left-sing-Conv2d-{}'.format(convs[i]))
U = torch.cat(U[:n_sv], dim=0).view(n_sv, -1)
su = torch.load('alex_save/feat-singular-Conv2d-{}'.format(convs[i]))
su = su[:n_sv]
V = torch.load('alex_save/feat-right-sing-Conv2d-{}'.format(convs[i+1]))
V = torch.cat(V[:n_sv], dim=0).view(n_sv, -1)
sv = torch.load('alex_save/feat-singular-Conv2d-{}'.format(convs[i+1]))
sv = sv[:n_sv]
print('Ratio layer i : {:.4f}'.format(float(su[0] / su[-1])))
print('Ratio layer i+1: {:.4f}'.format(float(sv[0] / sv[-1])))
U, V = U.cpu(), V.cpu()
if i == 0:
sigmau = torch.diag(torch.Tensor(su))
else:
sigmau = torch.diag(torch.sqrt(torch.Tensor(su)))
if i == len(convs) - 2:
sigmav = torch.diag(torch.Tensor(sv))
else:
sigmav = torch.diag(torch.sqrt(torch.Tensor(sv)))
expected = sigmau[0,0] * sigmav[0,0]
print('Expected: {}'.format(expected))
lip_spectral *= float(expected)
try:
curr, _ = optim_nn_pca_greedy(sigmav @ V, U.t() @ sigmau)
print('Approximation: {}'.format(curr))
lip *= float(curr)
except:
print('Probably something went wrong...')
lip *= float(expected)
#########
# Linears
#########
for i in range(len(lins) - 1):
print('Dealing with linear layer {}'.format(i))
U = torch.load('alex_save/feat-left-sing-Linear-{}'.format(lins[i]))
U = torch.cat(U[:n_sv], dim=0).view(n_sv, -1)
su = torch.load('alex_save/feat-singular-Linear-{}'.format(lins[i]))
su = su[:n_sv]
V = torch.load('alex_save/feat-right-sing-Linear-{}'.format(lins[i+1]))
V = torch.cat(V[:n_sv], dim=0).view(n_sv, -1)
sv = torch.load('alex_save/feat-singular-Linear-{}'.format(lins[i+1]))
sv = sv[:n_sv]
print('Ratio layer i : {:.4f}'.format(float(su[0] / su[-1])))
print('Ratio layer i+1: {:.4f}'.format(float(sv[0] / sv[-1])))
U, V = U.cpu(), V.cpu()
sigmau = torch.diag(torch.Tensor(su))
sigmav = torch.diag(torch.Tensor(sv))
if i == 0:
sigmau = torch.diag(torch.Tensor(su))
else:
sigmau = torch.diag(torch.sqrt(torch.Tensor(su)))
if i == len(lins) - 2:
sigmav = torch.diag(torch.Tensor(sv))
else:
sigmav = torch.diag(torch.sqrt(torch.Tensor(sv)))
expected = sigmau[0,0] * sigmav[0,0]
print('Expected: {}'.format(expected))
lip_spectral *= float(expected)
curr, _ = optim_nn_pca_greedy(sigmav @ V, U.t() @ sigmau)
print('Approximation: {}'.format(curr))
lip *= float(curr)
print('Lipschitz spectral: {}'.format(lip_spectral))
print('Lipschitz approximation: {}'.format(lip))
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Community PID provider."""
from invenio_pidstore.errors import PIDAlreadyExists, PIDDoesNotExistError
from invenio_pidstore.models import PIDStatus
from invenio_pidstore.providers.base import BaseProvider
class CommunitiesIdProvider(BaseProvider):
"""Community identifier provider.
This is the recommended community id provider.
It uses the value of the 'id' present in our data to generate the
identifier.
"""
pid_type = 'comid'
"""Type of persistent identifier."""
pid_provider = None
"""Provider name."""
object_type = 'rec'
"""Type of object."""
default_status = PIDStatus.REGISTERED
"""Community IDs with an object are by default registered.
Default: :attr:`invenio_pidstore.models.PIDStatus.REGISTERED`
"""
@classmethod
def create(cls, record, **kwargs):
"""Create a new commuinity identifier.
For more information about parameters,
see :meth:`invenio_pidstore.providers.base.BaseProvider.create`.
:param record: The community record.
:param kwargs: dict to hold generated pid_value and status. See
:meth:`invenio_pidstore.providers.base.BaseProvider.create` extra
parameters.
:returns: A :class:`CommunitiesIdProvider` instance.
"""
kwargs['pid_value'] = record['id']
kwargs['status'] = cls.default_status
kwargs['object_type'] = cls.object_type
kwargs['object_uuid'] = record.model.id
return super(CommunitiesIdProvider, cls).create(**kwargs)
@classmethod
def update(cls, pid, new_value):
"""`Update the value of the Community identifier`.
:param pid: Persistent Identifier type.
:param new_value: The new string value.
:returns: A :class:`CommunitiesIdProvider` instance.
"""
try:
existing_pid = cls.get(new_value).pid
except PIDDoesNotExistError:
pass
else:
raise PIDAlreadyExists(
existing_pid.pid_type,
existing_pid.pid_value
)
pid.pid_value = new_value
return cls(pid)
|
class Sensor(object):
def __init__(self, name, value, unit, status, lower_non_recoverable=None, lower_critical=None,
lower_non_critical=None, upper_non_critical=None, upper_critical=None, upper_non_recoverable=None):
self.name = name
self.value = value
self.unit = unit
self.status = status
self.lower_non_recoverable = lower_non_recoverable
self.lower_critical = lower_critical
self.lower_non_critical = lower_non_critical
self.upper_non_critical = upper_non_critical
self.upper_critical = upper_critical
self.upper_non_recoverable = upper_non_recoverable
def __str__(self):
name = str(self.name)
value = '{:.3f}'.format(self.value) if isinstance(self.value, (int, float)) else str(self.value)
unit = str(self.unit)
status = str(self.status)
lower_non_recoverable = '{:.3f}'.format(self.lower_non_recoverable) if self.lower_non_recoverable else 'na'
lower_critical = '{:.3f}'.format(self.lower_critical) if self.lower_critical else 'na'
lower_non_critical = '{:.3f}'.format(self.lower_non_critical) if self.lower_non_critical else 'na'
upper_non_critical = '{:.3f}'.format(self.upper_non_critical) if self.upper_non_critical else 'na'
upper_critical = '{:.3f}'.format(self.upper_critical) if self.upper_critical else 'na'
upper_non_recoverable = '{:.3f}'.format(self.upper_non_recoverable) if self.upper_non_recoverable else 'na'
return f'{name.ljust(17)}| {value.ljust(11)}| {unit.ljust(11)}| {status.ljust(6)}| ' \
f'{lower_non_recoverable.ljust(10)}| {lower_critical.ljust(10)}| {lower_non_critical.ljust(10)}| ' \
f'{upper_non_critical.ljust(10)}| {upper_critical.ljust(10)}| {upper_non_recoverable}'
|
import random
import datetime
seed = datetime.datetime.now().microsecond
random.seed(seed)
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
letters = [x for x in letters]
for i in range(10):
alphabet1 = random.choice(letters)
alphabet2 = random.choice(letters)
number1 = random.randint(0, 9)
number2 = random.randint(0, 9)
number3 = random.randint(0, 9)
name = alphabet1 + alphabet2 + str(number1) + str(number2) + str(number3)
print(name, end=' ')
|
"""
Summary
-------
ASTRODF
Based on the sample average approximation, the solver makes the surrogate model within the trust region at each iteration k.
The sample sizes are determined adaptively.
Solve the subproblem and decide whether the algorithm take the candidate solution as next ieration center point or not.
Cannot handle stochastic constraints.
"""
from base import Solver
from numpy.linalg import inv
from numpy.linalg import norm
import numpy as np
import math
import warnings
warnings.filterwarnings("ignore")
class ASTRODF(Solver):
"""
Needed description
Attributes
----------
name : string
name of solver
objective_type : string
description of objective types:
"single" or "multi"
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
gradient_needed : bool
indicates if gradient of objective function is needed
factors : dict
changeable factors (i.e., parameters) of the solver
specifications : dict
details of each factor (for GUI, data validation, and defaults)
rng_list : list of rng.MRG32k3a objects
list of RNGs used for the solver's internal purposes
Arguments
---------
name : str
user-specified name for solver
fixed_factors : dict
fixed_factors of the solver
See also
--------
base.Solver
"""
def __init__(self, name="ASTRODF", fixed_factors={}):
self.name = name
self.objective_type = "single"
self.constraint_type = "deterministic"
self.variable_type = "continuous"
self.gradient_needed = False
self.specifications = {
"crn_across_solns": {
"description": "Use CRN across solutions?",
"datatype": bool,
"default": True
},
"delta_max": {
"description": "maximum value of the radius",
"datatype": float,
"default": 200
},
"eta_1": {
"description": "threshhold for success at all",
"datatype": float,
"default": 0.1
},
"eta_2": {
"description": "threshhold for good success",
"datatype": float,
"default": 0.5
},
"gamma_0": {
"description": "shrinkage/expansion ratio for delta_0 in parameter tuning",
"datatype": float,
"default": 0.5
},
"gamma_1": {
"description": "very successful step radius increase",
"datatype": float,
"default": 1.25
},
"gamma_2": {
"description": "unsuccessful step radius decrease",
"datatype": float,
"default": 0.8
},
"w": {
"description": "decreasing rate for delta in contracation loop",
"datatype": float,
"default": 0.9
},
"mu": {
"description": "the constant to make upper bound for delta in contraction loop",
"datatype": float,
"default": 100
},
"beta": {
"description": "the constant to make the delta in main loop not too small",
"datatype": float,
"default": 50
},
"c_lambda": {
"description": "hyperparameter (exponent) to determine minimum sample size",
"datatype": float,
"default": 0.1 ##changed
},
"epsilon_lambda": {
"description": "hyperparameter (coefficient) to determine minimum sample size",
"datatype": float,
"default": 0.5
},
"kappa": {
"description": "hyperparameter in adaptive sampling in outer/inner loop",
"datatype": float,
"default": 100
}
}
self.check_factor_list = {
"crn_across_solns": self.check_crn_across_solns,
"sample_size": self.check_sample_size
}
super().__init__(fixed_factors)
def check_sample_size(self):
return self.factors["sample_size"] > 0
'''
def check_solver_factors(self):
pass
'''
def standard_basis(self, size, index):
arr = np.zeros(size)
arr[index] = 1.0
return arr
def local_model_evaluate(self, x_k, q):
X = [1]
X = np.append(X, np.array(x_k))
X = np.append(X, np.array(x_k) ** 2)
return np.matmul(X, q)
def samplesize(self, k, sig2, delta):
c_lambda = self.factors["c_lambda"]
epsilon_lambda = self.factors["epsilon_lambda"]
kappa = self.factors["kappa"]
# lambda_k = max(2,(10 + c_lambda) * math.log(k+1, 10) ** (1 + epsilon_lambda))
# lambda_k = max(3,(10 + c_lambda * problem.dim * math.log(problem.dim+0.1, 10)) * math.log(k+1, 10) ** (1 + epsilon_lambda))
lambda_k = (10 + c_lambda) * math.log(k, 10) ** (1 + epsilon_lambda)
# S_k = math.floor(max(3,lambda_k,(lambda_k*sig)/((kappa^2)*delta**(2*(1+1/alpha_k)))))
# S_k = math.floor(max(lambda_k, (lambda_k * sig) / ((kappa ^ 2) * delta ** 4)))
# compute sample size
N_k = math.ceil(max(2, lambda_k, lambda_k * sig2 / ((kappa ^ 2) * delta ** 4)))
return N_k
def model_construction(self, x_k, delta, k, problem, expended_budget):
w = self.factors["w"]
mu = self.factors["mu"]
beta = self.factors["beta"]
j = 0
d = problem.dim
while True:
fval = []
j = j + 1
delta_k = delta * w ** (j - 1)
# make the interpolation set
Y = self.interpolation_points(x_k, delta_k, problem)
for i in range(2 * d + 1):
new_solution = self.create_new_solution(Y[i][0], problem)
# need to check there is existing result
problem.simulate(new_solution, 1)
expended_budget += 1
sample_size = 1
# Adaptive sampling
while True:
problem.simulate(new_solution, 1)
expended_budget += 1
sample_size += 1
sig2 = new_solution.objectives_var
if sample_size >= self.samplesize(k, sig2, delta_k):
break
fval.append(-1 * problem.minmax[0] * new_solution.objectives_mean)
Z = self.interpolation_points(np.array(x_k) - np.array(x_k), delta_k, problem)
# make the model and get the model parameters
q, grad, Hessian = self.coefficient(Z, fval, problem)
# check the condition and break
if norm(grad) > 0.1:
break
if delta_k <= mu * norm(grad):
break
delta_k = min(max(beta * norm(grad), delta_k), delta)
return fval, Y, q, grad, Hessian, delta_k, expended_budget
def coefficient(self, Y, fval, problem):
M = []
d = problem.dim
for i in range(0, 2 * d + 1):
M.append(1)
M[i] = np.append(M[i], np.array(Y[i]))
M[i] = np.append(M[i], np.array(Y[i]) ** 2)
q = np.matmul(inv(M), fval)
Hessian = np.diag(q[d + 1:2 * d + 1])
return q, q[1:d + 1], Hessian
def interpolation_points(self, x_k, delta, problem):
Y = [[x_k]]
d = problem.dim
epsilon = 0.01
for i in range(0, d):
plus = Y[0] + delta * self.standard_basis(d, i)
minus = Y[0] - delta * self.standard_basis(d, i)
if sum(x_k) != 0:
# block constraints
if minus[0][i] < problem.lower_bounds[i]:
minus[0][i] = problem.lower_bounds[i] + epsilon
# Y[0][i] = (minus[0][i]+plus[0][i])/2
if plus[0][i] > problem.upper_bounds[i]:
plus[0][i] = problem.upper_bounds[i] - epsilon
# Y[0][i] = (minus[0][i]+plus[0][i])/2
Y.append(plus)
Y.append(minus)
return Y
def parameter_tuning(self, delta, problem):
recommended_solns = []
intermediate_budgets = []
expended_budget = 0
# default values
delta_max = self.factors["delta_max"]
eta_1 = self.factors["eta_1"]
eta_2 = self.factors["eta_2"]
gamma_1 = self.factors["gamma_1"]
gamma_2 = self.factors["gamma_2"]
k = 0 # iteration number
# Start with the initial solution
new_x = problem.factors["initial_solution"]
new_solution = self.create_new_solution(new_x, problem)
recommended_solns.append(new_solution)
intermediate_budgets.append(expended_budget)
while expended_budget < problem.factors["budget"] * 0.01:
k += 1
fval, Y, q, grad, Hessian, delta_k, expended_budget = self.model_construction(new_x, delta, k, problem, expended_budget)
# Cauchy reduction
if np.matmul(np.matmul(grad, Hessian), grad) <= 0:
tau = 1
else:
tau = min(1, norm(grad) ** 3 / (delta * np.matmul(np.matmul(grad, Hessian), grad)))
grad = np.reshape(grad, (1, problem.dim))[0]
candidate_x = new_x - tau * delta * grad / norm(grad)
candidate_solution = self.create_new_solution(tuple(candidate_x), problem)
# adaptive sampling needed
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size = 1
# Adaptive sampling
while True:
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size += 1
sig2 = candidate_solution.objectives_var
if sample_size >= self.samplesize(k, sig2, delta_k):
break
# calculate success ratio
fval_tilde = -1 * problem.minmax[0] * candidate_solution.objectives_mean
# replace the candidate x if the interpolation set has lower objective function value
if min(fval) < fval_tilde:
minpos = fval.index(min(fval))
fval_tilde = min(fval)
candidate_x = Y[minpos][0]
if (self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
np.array(candidate_x) - np.array(new_x), q)) == 0:
rho = 0
else:
rho = (fval[0] - fval_tilde) / (
self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
candidate_x - new_x, q));
if rho >= eta_2: # very successful
new_x = candidate_x
final_ob = candidate_solution.objectives_mean
delta_k = min(gamma_1 * delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
elif rho >= eta_1: # successful
new_x = candidate_x
final_ob = candidate_solution.objectives_mean
delta_k = min(delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
else:
delta_k = min(gamma_2 * delta_k, delta_max)
final_ob = fval[0]
return final_ob, k, delta_k, recommended_solns, intermediate_budgets, expended_budget, new_x
def solve(self, problem):
"""
Run a single macroreplication of a solver on a problem.
Arguments
---------
problem : Problem object
simulation-optimization problem to solve
Returns
-------
recommended_solns : list of Solution objects
list of solutions recommended throughout the budget
intermediate_budgets : list of ints
list of intermediate budgets when recommended solutions changes
"""
recommended_solns = []
intermediate_budgets = []
expended_budget = 0
delta_max = self.factors["delta_max"]
gamma_0 = self.factors["gamma_0"]
delta_candidate = [gamma_0 * delta_max, delta_max, delta_max / gamma_0]
#print(delta_candidate)
# default values
eta_1 = self.factors["eta_1"]
eta_2 = self.factors["eta_2"]
gamma_1 = self.factors["gamma_1"]
gamma_2 = self.factors["gamma_2"]
k = 0 # iteration number
# Start with the initial solution
new_x = problem.factors["initial_solution"]
new_solution = self.create_new_solution(new_x, problem)
recommended_solns.append(new_solution)
intermediate_budgets.append(expended_budget)
# Parameter tuning run
tp_final_ob_pt, k, delta, recommended_solns, intermediate_budgets, expended_budget, new_x = self.parameter_tuning(
delta_candidate[0], problem)
for i in range(1, 3):
final_ob_pt, k_pt, delta_pt, recommended_solns_pt, intermediate_budgets_pt, expended_budget_pt, new_x_pt = self.parameter_tuning(
delta_candidate[i], problem)
expended_budget += expended_budget_pt
if -1 * problem.minmax[0] * final_ob_pt < -1 * problem.minmax[0] * tp_final_ob_pt:
k = k_pt
delta = delta_pt
recommended_solns = recommended_solns_pt
intermediate_budgets = intermediate_budgets_pt
new_x = new_x_pt
intermediate_budgets = (
intermediate_budgets + 2 * np.ones(len(intermediate_budgets)) * problem.factors["budget"] * 0.01).tolist()
intermediate_budgets[0] = 0
while expended_budget < problem.factors["budget"]:
k += 1
fval, Y, q, grad, Hessian, delta_k, expended_budget = self.model_construction(new_x, delta, k, problem,
expended_budget)
# Cauchy reduction
if np.matmul(np.matmul(grad, Hessian), grad) <= 0:
tau = 1
else:
tau = min(1, norm(grad) ** 3 / (delta * np.matmul(np.matmul(grad, Hessian), grad)))
grad = np.reshape(grad, (1, problem.dim))[0]
candidate_x = new_x - tau * delta * grad / norm(grad)
for i in range(problem.dim):
if candidate_x[i] < problem.lower_bounds[i]:
candidate_x[i] = problem.lower_bounds[i] + 0.01
elif candidate_x[i] > problem.upper_bounds[i]:
candidate_x[i] = problem.upper_bounds[i] - 0.01
candidate_solution = self.create_new_solution(tuple(candidate_x), problem)
# adaptive sampling needed
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size = 1
# Adaptive sampling
while True:
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size += 1
sig2 = candidate_solution.objectives_var
if sample_size >= self.samplesize(k, sig2, delta_k):
break
# calculate success ratio
fval_tilde = -1 * problem.minmax[0] * candidate_solution.objectives_mean
# replace the candidate x if the interpolation set has lower objective function value
if min(fval) < fval_tilde:
minpos = fval.index(min(fval))
fval_tilde = min(fval)
candidate_x = Y[minpos][0]
if (self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
np.array(candidate_x) - np.array(new_x), q)) == 0:
rho = 0
else:
rho = (fval[0] - fval_tilde) / (
self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
candidate_x - new_x, q));
if rho >= eta_2: # very successful
new_x = candidate_x
delta_k = min(gamma_1 * delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
elif rho >= eta_1: # successful
new_x = candidate_x
delta_k = min(delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
else:
delta_k = min(gamma_2 * delta_k, delta_max)
return recommended_solns, intermediate_budgets
|
"""
O Pandas fornece muitas "funções de resumo" simples que reestruturam os dados de alguma maneira útil.
"""
import pandas as pd
reviews = pd.read_csv("./summary_maps/summary_functions_1/census.csv")
print(reviews.describe())
# print(reviews.education.describe()) # faz sentido só com dados do tipo númerico
print(reviews.hour_per_week.describe())
# média
print(reviews.hour_per_week.mean())
# mediana
print(reviews.hour_per_week.median())
# ver lista(array) de valores possíveis
print(reviews.education.unique())
print(reviews.hour_per_week.unique())
# ver uma lista de valores possíveis e com que frequência eles ocorrem no conjunto de dados
print(reviews.education.value_counts())
# retorna o indice do valor máximo
print("Idxmax encontrado:")
print(reviews.iloc[reviews["final_weight"].idxmax()])
print("Testando:")
# usando uma proporção pegar indice do valor máximo
bargain_idx = (reviews.final_weight/ reviews.age).idxmax()
print(bargain_idx)
print('-'*20)
print(reviews.loc[bargain_idx])
bargain_wine = reviews.loc[bargain_idx, 'occupation']
print(bargain_wine)
|
# revese list without using built in function or wihout taking another list
a=[1,2,3,4,5]
i=0
j=len(a)-1
while i<j:
t=a[i]
a[i]=a[j]
a[j]=t
i+=1
j-=1
print("revese of a= ",a)
## or
a=a[len(a)-1::-1]
print(a)
##############################################################
### reverse list taking aother list using swapping # need to right
# a=[1,2,3,4,5]
# b=[]
# i=len(a)-1
# j=0
# while i>=0:
# b[j]=a[i]
# j+=1
# i-=1
# print("revese of a= ",b)
##########################################
# a=[1,2,3,4,5]
# b=[]
# i=0
# j=len(a)-1
# while i>=0:
# t=a[i]
# b[j]=a[i]
# a[j]=t
# i+=1
# j-=1
# print("revese of a= ",b)
##shifting each element one step left or right in list
#############################################################
# string can be directly converted into list by using string
#string method 'splilt'
# a='pradeep'
# b=a.split()
# print(b)
#print(type(a))
##############################################################
# or using list we can do same
# a='pradeep'
# b=list(a)
# print(b)
##################### or #################################
# import ast
# color ="['Red', 'Green', 'White']"
# print(ast.literal_eval(color))
##########################################################
#revese list without using built in function i have to do
# size=int(input('enter the size of list: '))
# a=[]
# for i in range(size):
# num=int(input('enter the number: '))
# a.append(num)
# print(a)
# i=0
# j=size-1
# for i in range(len(a)):
# t=a[i]
# a='pradeep'
# b=list(a)
# print(b)
######################################################
###########################################
## USING REMOVE method in list IN A DIFFERENT WAY
# a=[1,2,3,4,5]
# b=a
# for i in b:
# b.remove(i)
# print(a)
# print(b)
## OUTPUT:-
#[2,4]
###########################################
# a=[1,2,3,4,5]
# b=a
# #b=a.copy()
# for i in a:
# a.remove(i)
# print(a)
# print(b)
# a=12_1.2
# b=1.56
# print(a+b)
## OUTPUT:-122.76
##################################################################
## without using print after input we need add list with input
# a=['pradeep','vikas','roshan']
# # # b=a
# # b=a.copy()
# # b=a
# for i in a:
# print(i,end='')
# age=input(' age is:')
####################################################################
####################################################################
##SLICE ASSIGNMENT IN LIST
# l=[10,2,4,7,3,8,910,20]
# l[1:3]=[100,100]
# print(l)
##OUTPUT:-[10, 100, 100, 7, 3, 8, 910, 20]
##REMOVE MORE THAN ONE ELEMENT IN ONCE by using slice assignment
# l=[10,20,30,40,50]
# l[1:3]=[]
# print(l)
##replace more than one element at once
# l=[10,2,4,7,3,8,910,20]
# l[1:3]=[100,100]
# print(l)
##output:-[10, 100, 100, 7, 3, 8, 910, 20]
#################################################################
################################################################
##COPY OR DEEP COPY OF LIST
# c=[1,2,3]
# d=list(c)
# c[0]=[30]
# print(c)
# a=[[1,2,3],[21,23]
###########################################################
#BUBBLE SORT
# a=[10,5,8,20,9,7,34,22]
# for i in range(len(a)):
# for j in range((len(a)-i-1)):
# if a[j]>a[j+1]:
# t=a[j]
# a[j]=a[j
# a[j+1]=t
# print(a)
#######################################################
# UNIVERSAL CODE / dynamic code for BUBBLE SORT
# size=int(input('enter the size of the list:'))
# a=[]
# #a=[10,5,8,20,9,7]
# for i in range(size):
# val=int(input('enter number: '))
# a.append(val)
# for i in range(len(a)):
# for j in range(0,(len(a)-i-1)):
# if a[j]>a[j+1]:
# t=a[j]
# a[j]=a[j+1]
# a[j+1]=t
# print('sorted list:',a)
########################################################
# how to take nested list as input
# t=[]
# n=int(input('enter size :'))
# for i in range (n):
# ta=[]
# for j in range(n):
# val=int(input('value :'))
# ta.append(val)
# t.append(ta)
# print(t)
#########################################################
# # ATM USING LIST
# password=12345
# bal=20000
# language_option=['1.ENGLISH\n','2.HINDI\n']
# #choose_transaction=['1.balance inquiry\n','2.cash withdrawl\n','3.cash deposit\n','4.exit transaction\n']
# choose_transaction=['1.balane inquiry\n,2.cash withdrwl\n,3.cash deposit\n,4.exit transaction\n']
# select_option2=[1,2]
# select_option=[1,2,3,4]
# print('WELCOME TO THE ATM MACHINE\nplease swipe your card')
# print('please choose your lenguage option\n1.ENGLISH\n2.HINDI\n')
# count=0
# iput=int(input('choose your language_option :'))
# if iput==select_option2[0]:
# while True:
# #print('WELCOME TO THE ATM MACHINE\nplease swipe your card')
# # print('please choose your lenguage option\n1.ENGLISH\n2.HINDI\n')
# print('1.balane inquiry\n,2.cash withdrwl\n,3.cash deposit\n,4.exit transaction\n')
# if count==0:
# iput=int(input('choose your language_option :'))
# if iput==select_option2[0]:
# print('1.balane inquiry\n2.cash withdrwl\n3.cash deposit\n4.exit transaction\n')
# print(for i in ['1.balane inquiry\n,2.cash withdrwl\n,3.cash deposit\n,4.exit transaction\n'])
#for i in (choose_transaction):
#print(choose_transaction[i])
# print(['1.balane inquiry\n,2.cash withdrwl\n,3.cash deposit\n,4.exit transaction\n'])
# #print(['1.balance inquiry\n2.cash withdrawl\n3.cash deposit\n4.exit transaction\n'])
# iput=int(input('please enter your transaction: '))
# iput=int(input('chose your transaction: '))
# if iput==1:
# pas=int(input('enter your password:'))
# if password == pas:
# print('your current balance is',bal)
# another_tran=input('do want to another transaction YES/NO press y or n: ')
# if another_tran == 'y':
# continue
# else:
# print('exited from the transaction\n thanks for using atm')
# break
# else:
# print('wrong password')
# elif iput==2:
# pas=int(input('enter your password:'))
# if pas==password:
# amout=int(input('enter amount to withdrawl: '))
# if amout<=bal:
# print('collect your cash\nyour remaining balance is',bal-amout)
# else:
# print('insufficient balance in your account')
# another_tran=input('do want to another transaction YES/NO press y or n: ')
# if another_tran == 'y':
# continue
# else:
# print('you exited from the transaction\n thanks for using atm')
# break
# else:
# print('wrong password')
# elif iput==3:
# pas=int(input('enter your password:'))
# if pas==password:
# dipos=int(input('enter amount to deposit: '))
# print('cash is diposited\nyour new balance is',bal+dipos)
# another_tran=input('do want to another transaction YES/NO press y or n: ')
# if another_tran == 'y':
# continue
# else:
# print('you exited from the transaction\n thanks for using atm')
# break
# else:
# print('wrong password')
# elif iput==4:
# print('successfully exited from the transaction\n thanks for using atm')
# #pas=int(input('enter your password:')
# #if pas==password:
# #dipos=int(input('enter amount to deposit: '))
# #print('cash is diposited\nyour new balane is',bal+dipos)
# else:
# print('please enter correct option')
# else:
# print('sorry, this language is not available.')
####################################################################################################
#this is from ,meraki more questions series
# Python mein hum ek loop ke andar loop bhi chala sakte hain.
#Sochiye humare paas ek list hai jisme aur list hain jinme integers hain. Kuch aise:
# big_list = [[1,2,3], [5,8,9], [4,3,77,521,31,311]]
# #Iss list se agar humne saare numbers ko ek ek kar ke print karna hai.
#toh hum kuch aisa code likh sakte hain:
# big_list = [[1,2,3], [5,8,9], [4,3,77,521,31,311]]
# count1 = 0
# while count1 < len(big_list): #3
# small_list_length = len(big_list[counter1])
# count2 = 0
# while count2 < small_list_length: #6
# print (big_list[counter1][counter2])
# count2 = count2 + 1
# count1 = count1 + 1
# print ('-----')
## print( len(big_list))
# #print( small_list_length)
# #print(len(big_list[counter1]))
# it is iterating each item below questions
# a = [[1,2,3], [5,8,9], [4,3,77,521,31,311]]
# count1 =0
# while count1 < len(a): #3
# b = len(a[count1])
# count2 = 0
# while count2 < b: #6
# print (a[count1][count2]) # count1=0 01 02 03,count1=1 10 11 13
# count2 = count2 + 1 # count1=3 30 31 32 33 34 35
# count1 = count1 + 1
# print ('------')
###################################################
# #USING FOR LOOP
# a = [[1,2,3], [5,8,9], [4,3,77,521,31,311]]
# count1 = 0
# for i in range (len(a)):
# b = len(a[count1])
# count2 = 0
# for j in range(count2,b):
# print (a[count1][count2]) # count1=0 01 02 03,count1=1 10 11 13
# count2 = count2 + 1 # count1=3 30 31 32 33 34 35
# count1 = count1 + 1
# print ('------')
##########################################################
#code dope 11
#remove dublicate item without using other list
# a = [1,2,3,2,1,3,12,12,32]
# i = 0
# while i < len(a):
# j = i+1
# while j < len(a):
# if a[i] == a[j]:
# del(a[j])
# j=j+1
# i = i+1
# print(a)
##python way
# a = [1,2,3,2,1,3,12,12,32]
# a = list(set(a))
# print a
# ########################################################
# a=[[1,2,3],[4,5,6]]
# i = 0
# while i<len(a):
# j = 0
# while j < len(a[i]):
# print(a[i][j])
# j = j+1
# i = i+1
##############################################################
# for i in range (len(a)):
# for j in range (len(a[i])):
# print(a[i][j])
################################################################
# #W3RESOUrces
# Write a Python program to count the number of strings.
# where the string length is 2 or more and the first and last character are same from a given list of strings.
# words=['abc', 'xyz', 'aba', '1221']
# ctr = 0
# for word in words:
# if len(word) > 1 and word[0] == word[-1]:
# ctr += 1
# print(ctr)
#################################################################
##QN- TABLE IN NESTED LIST ACCORDING TO USER
#a=int(input('enter no upto u want table'))
# # nested list input how to take
# t=[]
# n=int(input('enter no upto u want table:'))
# for i in range (1,n+1):
# ta=[]
# b=1
# for j in range(1,11):
# c=i*j
# ta.append(c)
# t.append(ta)
# print(t)
## HOW TO CONVERT NESTED LIST TO FLATTERN LIST
# a=[6,[4,3,[2,1,[49]]]] # have to do this
# b=[]
# for i in a:
# b.append(i)
# print(b)
###################################################
## FIND HCF OF a list or a value
# a=int(input('enter first num: '))
# b=int(input('enter second num: '))
# if a>b:
# mn=b
# else:
# mn=a
# for i in range(1,mn+1):
# if a%i==0 and b%i==0:
# HCF=i
# print(f'hcf of 1st and 2nd is {HCF}')
############################################
# # #LCM OF NUMBER
# a=int(input('enter first num: '))
# b=int(input('enter second num:'))
# if a>b:
# maxn=a
# else:
# maxn=b
# value=maxn
# while (True):
# if maxn%a==0 and maxn%b==0:
# break
# else:
# maxn=maxn+value
# print(f'lcm of {a} and {b} is {maxn}')
###############################################
#THIRD MAX FROM THE LIST
# a=[2,3,34,50,100,21,10,23,49]
# for i in range (len(a)):
# for j in range(len(a)-i-1):
# if a[j]>a[j+1]:
# t=a[j]
# a[j]=a[j+1]
# a[j+1]=t
# print(a)
# print(f'third highest is {a[-3]}')
#####################################################
##FLATTEN LIST FROM NESTED
# multiple levels of nesting allowed.
# input list
# l = [1 k=[]
# l = [1, 2, [3, 4, [5, 6]], 7, 8, [9, [10]]]
# for i in range (len(l)):
# for j in range (len(l[i])):
# for k in range (len[i][j]):
# k.append(k[i][j][k])
# print(f'originonal list is {l}')
# print(f'flatten list is {k}')
# if type(i)==list:
# for j in i:
# k.append(j)
# else:
# k.append(i)
##############################################################
# a=[1,2,4,56,3,[3,[3,44],4,53,2,[3,5,[2,5,67,8],23]]]
# main_list = []
# while True:
# c = True
# for i in a:
# if type(i) == list:
# main_list.extend(i)
# c = False
# else:
# main_list.append(i)
# if c:
# break
# else:
# a = main_list
# main_list = []
# print(main_list)
##################################################################
# nested_list = [[1,[2, [2, 3, 4], [5, [6,11,12,13], 7], [8, 9, 10]]]]
# #nested_list=[[1, 2, [3, 4, [5, 6]], 7, 8, [9, [10]],34]]
# nested_list=[1, 2, [3, 4, [5, 6]],0,[9],8, [9, [10]]]
# flat=[]
# le=len(nested_list)
# print(le)
# s=0
# while le>s:
# for i in nested_list:
# if type(i)==list:
# flat.extend(i)
# else:
# flat.append(i)
# s+=1
# #print(flat)
# #print(flat)
# if le>s:
# nested_list=flat
# flat=[]
# #print(flat)
# print(flat)
##########################################
# a=9
# v=a
# a=0
# print(v)
#output:v=9
############################################################
##2nd SECOND METHOD TO FLATTEN LIST
# nested_list=[1,[2,[3,[4,[5,[6,[7,8,9,10]]]]]]]
# emptyList=[]
# while nested_list:
# copyItem=nested_list.pop()
# #print(copyItem)
# if type(copyItem)==list:
# nested_list.extend(copyItem)
# else:
# emptyList.append(copyItem)
# emptyList.reverse()
# print(emptyList)
# #HANGMAN GAME 1 ###############################################
# import random
# hangman_f=[' +--------------+------\n | |\n | |\n | |\n | O\n |\n |\n |\n |\n |\n |',' +--------------+------\n | |\n | |\n | |\n | O\n | |\n | |\n |\n |\n |\n |',' +--------------+------\n | |\n | |\n | |\n | O\n | |\n | |\n | / \\\n |\n |\n |',' +--------------+------\n | |\n | |\n | |\n | O\n | |\n | / \\\n | |\n | |\n |\n |\n |',' +--------------+------\n | |\n | |\n | |\n | O\n | |\n | / \\\n | |\n | |\n | / \\\n |\n |']
# i=0
# while i<=5:
# x=random.randint(1,10)
# guess=input('enter your guessing no between 1 to 10.: ')
# if guess == x:
# print('you are the WINNNER!')
# break
# else:
# #for i in hangman_f:
# if i==1:
# print(hangman_f[0])
# elif i==2:
# print(hangman_f[1])
# elif i==3:
# print(hangman_f[2])
# elif i==4:
# print(hangman_f[3])
# elif i==4:
# print(hangman_f[4])
# i+=1
##second method is by vikash for figure part only
# while len(hangman_f)>i:
# x=random.randint(1,11)
# inp=int(input('Guess any num between 1 to 10: '))
# if inp==x:
# print('congratulation, you won the game!!')
# break
# else:
# print(hangman_f[i])
# i+=1
#############################################################
|
from .utils.dataIO import fileIO
from .utils import checks
from __main__ import send_cmd_help
from __main__ import settings as bot_settings
# Sys.
import discord
from discord.ext import commands
from operator import itemgetter, attrgetter
from copy import deepcopy
import random
import os
import sys
import time
import logging
#
# A Four in a row game for the Red-DiscordBot.
#
# https://github.com/Canule/Red-DiscordBot
#
#
__author__ = "Mash"
__version__ = "1.0.0"
#TODO:
# Finish addbot player: Disabled by default.
# Cleanup some more, look for inefficient code, double checks etc..
DIR_DATA = "data/fourinarow"
GAMES = DIR_DATA+"/games.json"
SETTINGS = DIR_DATA+"/settings.json"
PLAYERS = DIR_DATA+"/players.json"
STATS = DIR_DATA+"/stats.json"
LOGGER = DIR_DATA+"/fourinarow.log"
BACKUP = DIR_DATA+ "/players.backup"
class FourInARow:
"""Four in a row
Dominate the board!"""
def __init__(self, bot):
self.bot = bot
self.game = fileIO(GAMES, "load")
self.settings = fileIO(SETTINGS, "load")
self.players = fileIO(PLAYERS, "load")
self.stats = fileIO(STATS, "load")
self.BOARD_HEADER = self.settings["BOARD_HEADER"]
self.ICONS = self.settings["ICONS"]
self.TOKENS = self.settings["TOKENS"]
self.EMPTY = self.settings["ICONS"][0][0]
self.PREFIXES = bot_settings.prefixes
@commands.group(name="4row", pass_context=True)
async def _4row(self, ctx):
"""Four in a row game operations."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
return
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Game Operations
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@_4row.command(pass_context=True, no_pm=True)
async def register(self, ctx):
"""Registers an 'Four in a row' account."""
user = ctx.message.author
if self.account_check(user.id) is False:
registerData = await self.register_player(ctx, user)# returns {"showMsg": bool, "msg": str}
if registerData["showMsg"]:
await self.bot.say(registerData["msg"])
else:
await self.bot.say("{}` You are already registered!` ".format(user.mention))
@_4row.command(pass_context=True, no_pm=True)
async def new(self, ctx):
"""Create a new game."""
user = ctx.message.author
if self.account_check(user.id):
if ctx.message.channel.id not in self.game["CHANNELS"]:
now = round(time.time())
# Set-up a game.
self.game["CHANNELS"][ctx.message.channel.id] = {"board": self.empty_board(0),
"boardSize": 0,
"activePlayers": 0 ,
"PLAYERS": {"IDS": [], "NAMES": [], "TOKENS": []},
"VOTES_STP": {"votes": 0, "voteIds": []},
"turnIds": [],
"skipIds": [],
"inQue": "yes",
"deleteMsg": True,
"gameStarted": now,
"lastActivity": now,
"botDifficulty": self.settings["BOT_SETTINGS"]["DEFAULT_DIFFICULTY"],
"winner": "unknown"}
fileIO(GAMES, "save", self.game)
joinData = await self.join_game(ctx, user)# returns {"delMsg": bool, "showMsg": bool, "drawBoard": bool, "msg": str}
if joinData["delMsg"]:
await self.delete_message(ctx)
if joinData["showMsg"] and not joinData["drawBoard"]:
await self.bot.say(joinData["msg"])
elif joinData["showMsg"] and joinData["drawBoard"]:
await self.draw_board(ctx, joinData["msg"])
else:
await self.bot.say("{}` There is already a new game set!\nTry: '{}4row join'` ".format(user.mention, self.PREFIXES[0]))
else:
await self.bot.say( "{} ` You need an account in order to use this command.\nType:'{}4row register' to create one`".format(user.mention, self.PREFIXES[0]))
@_4row.command(pass_context=True, no_pm=True)
async def start(self, ctx):
"""Start the game."""
user = ctx.message.author
BOARDWIDTH = self.settings["BOARDWIDTH"]
try:
BOARDSIZE = self.game["CHANNELS"][ctx.message.channel.id]["boardSize"]
activePlayers = self.game["CHANNELS"][ctx.message.channel.id]["activePlayers"]
data = True
except Exception as e:
data = False
if data:
if not self.ingame_check(ctx, user.id):
await self.bot.say( "{} ` You need to be in a game to start.`".format(user.mention))
return
elif activePlayers <= 1:
await self.bot.say( "{} ` There must be at least on more player to start this game.`".format(user.mention))
return
elif self.ingame_check(ctx, user.id):
await self.start_game(ctx, user.id)
await self.delete_message(ctx)
await self.draw_board(ctx, "\n` Game started\nIf it's your turn use '{}token [number 1/{}]`".format(self.PREFIXES[0], BOARDWIDTH[BOARDSIZE]))
else:
await self.bot.say( "{} ` No game to start.`".format(user.mention))
@_4row.command(pass_context=True, no_pm=True)
async def stop(self, ctx):
"""Stops the game by voting or after a game has been expired."""
user= ctx.message.author
Allowed = False
if self.account_check(user.id):
# Check for a game @ channel.
try:
now = round(time.time())
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
activePlayers = CH_GAME["activePlayers"]
chGamestarted = CH_GAME["gameStarted"]
chLastActivity = CH_GAME["lastActivity"]
CH_VOTES_STP = CH_GAME["VOTES_STP"]
gameVoteUnlocks = self.settings["VOTE_UNLOCK_TIME"]
gameExpires = self.settings["EXPIRE_TIME"]
minVotesToUnlock = self.settings["MIN_VOTES_TO_UNLOCK"]
differenceStarted = now-chGamestarted
differenceLastActivity = now-chLastActivity
data = True
except Exception as e:
logger.info(e)
data = False
if data:
# User has voted already?
if user.id not in CH_VOTES_STP["voteIds"] or differenceStarted >= gameExpires:
# Check if game is expired of stop voted.
if differenceStarted >= gameExpires:
await self.stop_game(ctx)
self.stats["gamesTimedOut"] += 1
fileIO(STATS, "save", self.stats)
await self.bot.say("{} ` Game stopped`".format(user.mention))
return
elif activePlayers <= 1: # If for any reason one player is left behind in an active game, allow a stop.
await self.stop_game(ctx)
self.stats["gamesStopped"] += 1
fileIO(STATS, "save", self.stats)
await self.bot.say("{} ` Game stopped`".format(user.mention))
return
else:# Not expired yet so check unlock votes.
# Game is unlocked?
if differenceLastActivity >= gameVoteUnlocks:
CH_VOTES_STP["votes"] += 1
CH_VOTES_STP["voteIds"].append(user.id)
await self.delete_message(ctx)
await self.draw_board(ctx, "\n` Votes to stop this game: {}/{}`".format(CH_VOTES_STP["votes"], minVotesToUnlock))
# Save vote.
self.game["CHANNELS"][ctx.message.channel.id]["VOTES_STP"] = CH_VOTES_STP
fileIO(GAMES, "save", self.game)
# Game is locked for vote?
elif differenceLastActivity < gameVoteUnlocks:
timeLeft = gameVoteUnlocks-differenceLastActivity
#await self.delete_message(ctx)
await self.bot.say("{} ` Game is locked by a last activity cool-down, please wait {}sec. to stop(vote) this game down.`"
.format(user.mention, timeLeft))
# Game has no lock conditions so output not expired message.
elif differenceStarted < gameExpires:
timeLeft = gameExpires-differenceStarted
#await self.delete_message(ctx)
await self.bot.say("{} ` Game is not expired yet, please wait {}sec. to stop this game, or start a game in another channel`"
.format(user.mention, timeLeft))
# Check votes to stop game before expire.
if CH_VOTES_STP["votes"] >= minVotesToUnlock:
await self.stop_game(ctx)
#await self.delete_message(ctx)
self.stats["gamesUnlocked"] += 1
fileIO(STATS, "save", self.stats)
await self.bot.say("` Game stopped\nWell done {}, you ruined the game...`".format(user))
else: # user.id in CH_VOTES_STP["voteIds"]:
await self.bot.say( "{} ` You already voted.`".format(user.mention))
else:
await self.bot.say( "{} ` No game to stop.`".format(user.mention))
else:
await self.bot.say( "{} ` You need an account in order to use this command.\nType:'{}4row register' to create one`".format(user.mention, self.PREFIXES[0]))
@_4row.command(pass_context=True, no_pm=True)
async def join(self, ctx):
"""Join a new game."""
user = ctx.message.author
try:
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
data = True
except Exception as e:
logger.info(e)
data = False
if data and inQue == "yes":
joinData = await self.join_game(ctx, user)# returns {"delMsg": bool, "showMsg": bool, "drawBoard": bool, "msg": str}
if joinData["delMsg"]:
await self.delete_message(ctx)
if joinData["showMsg"] and not joinData["drawBoard"]:
await self.bot.say(joinData["msg"])
elif joinData["showMsg"] and joinData["drawBoard"]:
await self.draw_board(ctx, joinData["msg"])
else:
await self.bot.say("{} ` Nothing to join...`".format(user.mention))
@_4row.command(pass_context=True, no_pm=True)
async def leave(self, ctx):
"""Leave a game."""
user = ctx.message.author
try:
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
player = self.players["PLAYERS"][user.id]
data = True
except Exception as e:
logger.info(e)
data = False
msg = ""
if data and self.ingame_check(ctx, user.id):
leaveData = await self.leave_game(ctx, user)# returns {"delMsg": bool, "showMsg": bool, "drawBoard": bool, "msg": str, "stopGame": bool}
if leaveData["delMsg"]:
await self.delete_message(ctx)
if leaveData["showMsg"] and not leaveData["drawBoard"]:
await self.bot.say(leaveData["msg"])
elif leaveData["showMsg"] and leaveData["drawBoard"]:
await self.draw_board(ctx, leaveData["msg"])
if leaveData["stopGame"]:
self.stats["gamesRuined"] += 1
self.players["PLAYERS"][user.id]["STATS"]["wasted"] += 1
self.players["PLAYERS"][user.id]["STATS"]["points"] += self.settings["REWARDS"]["RUIENING"]
fileIO(PLAYERS, "save", self.players)
fileIO(STATS, "save", self.stats)
await self.stop_game(ctx)
else:
await self.bot.say("{} ` No game to leave from...`".format(user.mention))
@_4row.command(pass_context=True, no_pm=True)
async def board(self, ctx):
"""Displays the play field."""
user = ctx.message.author
await self.delete_message(ctx)
await self.draw_board(ctx, "")
@_4row.command(pass_context=True)
async def score(self, ctx):
"""Shows your score."""
user = ctx.message.author
try:
player = self.players["PLAYERS"][user.id]
stats = player["STATS"]
won = stats["won"] + stats["draw"]
lost = stats["loss"] + stats["wasted"]
data = True
except Exception as e:
logger.info(e)
data = False
if data and self.account_check(user.id):
total = won+lost
cTotal = total
# Make cTotal = 1 for calc. if player is new/no games.
if total == 0: cTotal = 1
ratio = float(won)/(cTotal)
resultRankings = await self.get_rankings(ctx, user.id)# Returns{"topScore": array, "userIdRank": string(userId)}
userIdRank = resultRankings["userIdRank"]
msg = "{}```\n".format(user.mention)
msg ="{}You have played ({}) games, of those you won ({}), lost ({}), played ({}) even, and ruined ({}).\nThat gives you a win/loss ratio of: ({})\n\n".format(msg,
str(total),
str(stats["won"]),
str(stats["loss"]),
str(stats["draw"]),
str(stats["wasted"]),
str(round(ratio, 2)))
msg = "{}With an total of ({}) moves, the average time you need to make a move is ({}) seconds.\nThe average duration of a game you're part of is ({}) Minutes.\n\n".format(msg,
str(stats["totalMoves"]),
str(round(stats["averageTimeTurn"], 2)),
str(round(stats["avarageTimeGame"]/60, 1)))
msg = "{}That makes you “{}” with ({}) points, and places you on #({}) of in total ({}) registered players.```".format(msg,
str(player["MSG"]["joiningMsg"]),
str(stats["points"]),
str(userIdRank),
len(self.players["PLAYERS"]))
await self.bot.say(msg)
else:
await self.bot.say( "{} ` You need an account in order to use this command.\nType:'{}4row register' to create one`".format(user.mention, self.PREFIXES[0]))
@_4row.command(pass_context=True, no_pm=True)
async def addbot(self, ctx):
"""Add a bot to the game in queue."""
user = ctx.message.author
bot = ctx.message.server.me
try:
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
data = True
except Exception as e:
logger.info(e)
data = False
# Check permission.
if self.account_check(user.id):
if self.settings["BOT_SETTINGS"]["ENABLED"]:
if data and inQue == "yes":
# Check is bot exist in players.
if not self.account_check(bot.id):
# Register bot
msg = await self.register_player(ctx, bot)# returns {"showMsg": bool, "msg": str}
# Check if account is made.
if self.account_check(bot.id):
joinData = await self.join_game(ctx, bot)# returns {"delMsg": bool, "showMsg": bool, "drawBoard": bool, "msg": str}
if joinData["delMsg"]:
await self.delete_message(ctx)
if joinData["showMsg"] and not joinData["drawBoard"]:
await self.bot.say(joinData["msg"])
elif joinData["showMsg"] and joinData["drawBoard"]:
await self.draw_board(ctx, joinData["msg"])
else:
await self.bot.say("{} ` Failed to add bot...`".format(user.mention))
else:
await self.bot.say( "{} ` No game to join`".format(user.mention))
else:
await self.bot.say( "{} ` The use of a bot player is disabled`".format(user.mention))
else:
await self.bot.say( "{} ` You need an account in order to use this command`".format(user.mention))
@_4row.command(pass_context=True, no_pm=True)
async def kickbot(self, ctx):
"""Removes a bot from the game in queue."""
user = ctx.message.author
bot = ctx.message.server.me
try:
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
data = True
except Exception as e:
logger.info(e)
data = False
# Check permission.
if self.account_check(user.id):
if self.settings["BOT_SETTINGS"]["ENABLED"]:
if data and inQue == "yes":
# Check is bot exist in players.
if not self.account_check(bot.id):
# Register bot.
msg = await self.leave_game(ctx, bot)# returns {"msg": str}
# Check if account is made.
if self.account_check(bot.id):
leaveData = await self.leave_game(ctx, bot)# returns {"delMsg": bool, "showMsg": bool, "drawBoard": bool, "msg": str, "stopGame": bool}
if leaveData["delMsg"]:
await self.delete_message(ctx)
if leaveData["showMsg"] and not leaveData["drawBoard"]:
await self.bot.say(leaveData["msg"])
elif leaveData["showMsg"] and leaveData["drawBoard"]:
await self.draw_board(ctx, leaveData["msg"])
if leaveData["stopGame"]:
await self.stop_game(ctx)
else:
await self.bot.say("{} ` Failed to remove bot...`".format(user.mention))
else:
await self.bot.say( "{} ` No queue to leave from`".format(user.mention))
else:
await self.bot.say( "{} ` The use of a bot player is disabled`".format(user.mention))
else:
await self.bot.say( "{} ` You need an account in order to use this command.\nType:'{}4row register' to create one`".format(user.mention, self.PREFIXES[0]))
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Direct Commands
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@commands.command(pass_context=True, no_pm=True, aliases=["t"])
async def token(self, ctx, someToken: int):
"""Insert token at given position."""
user= ctx.message.author
try:
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
inQue = CH_GAME["inQue"]
BOARDWIDTH = self.settings["BOARDWIDTH"]
BOARDSIZE = CH_GAME["boardSize"]
CH_PLAYERS = CH_GAME["PLAYERS"]
skipIds = CH_GAME["skipIds"]
data = True
except Exception as e:
logger.info(e)
data = False
if data and inQue == "no":
if user.id not in skipIds:
if await self.my_turn(ctx, user.id) == True:
tokenRow = None
freePos = None
stopGame = False
tokenRow = int(someToken)
tokenRow -= 1 # Index = 0/BOARDWIDTH
# Check if token is in range.
if tokenRow >= 0 and tokenRow <= BOARDWIDTH[BOARDSIZE]-1:
board = self.game["CHANNELS"][ctx.message.channel.id]["board"]
freePos = self.lowest_empty_space(ctx, tokenRow)
if freePos == -1:
await self.bot.say("\n{} ` Try another row.`".format(user.mention))
else:
await self.make_move(ctx, user, tokenRow, freePos)
commentList = ["\n ` Type !listtokens to pick your favourite one.`",
"\n ` Type !token to pick where to place your tokens.`"]
comment = random.choice(commentList)
if len(CH_PLAYERS["IDS"]) >= 1:
for usr in range (len(CH_PLAYERS["IDS"])):
if self.board_full(ctx):
comment = ("\n{} ` It's a tie! `".format(user.mention))
self.game["CHANNELS"][ctx.message.channel.id]["winner"] = "draw"# Needed for update_score.
await self.update_score(ctx)# Update score of all players.
stopGame = True
elif self.is_winner(ctx, self.TOKENS[CH_PLAYERS["TOKENS"][usr]][0]):
comment = ("\n{} Owns this game with his {}'s{}".format(user.mention,
self.TOKENS[CH_PLAYERS["TOKENS"][usr]][0],
self.TOKENS[CH_PLAYERS["TOKENS"][usr]][1]))
self.game["CHANNELS"][ctx.message.channel.id]["winner"] = user.id# Needed for update_score.
await self.update_score(ctx)# Update score of all players.
stopGame = True
if not stopGame:
self.next_turn(ctx, user)
fileIO(GAMES, "save", self.game)
await self.delete_message(ctx)
await self.draw_board(ctx, comment)
# If game needs to be stopped by above conditions.
if stopGame == True:
comment = "\nCongratulations, you won 2500 credits."
await self.draw_board(ctx, comment, True)# Dm board to user.
await self.bot.say("` Game ended`")
await self.stop_game(ctx)
else:
await self.bot.say("{} ` '{}token [number 1/{}]'`".format(user.mention, self.PREFIXES[0], BOARDWIDTH[BOARDSIZE]))
else:
await self.bot.say("{} ` Wait for your turn!`".format(user.mention))
else:
await self.bot.say("{} ` You left the game, you idiot!`".format(user.mention))
else:
await self.bot.say("{} ` Game not available or started.`".format(user.mention))
@commands.command(pass_context=True, no_pm=True)
async def setmytoken(self, ctx, newToken : int):
"""Change your preferred token."""
user= ctx.message.author
if self.account_check(user.id):
max = len(self.settings["TOKENS"])-1# 0 is reserved for sys
if newToken >= 1 and newToken <= max:
msg = await self.token_switch(ctx, user, newToken)
try:
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
data = True
except Exception as e:
logger.info(e)
data = False
if data and inQue == "yes" and user.id in self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"]:
await self.delete_message(ctx)
await self.draw_board(ctx, msg)
else:
await self.bot.say("{}".format(msg))
else:
await self.bot.say("{} ` Choose a token number between 0 and {}, check DM for available tokens`".format(user.mention, max))
msg = await self.msg_available_tokens()
await self.bot.send_message(ctx.message.author, msg)
else:
await self.bot.say( "{} ` You need an account in order to use this command.\nType:'{}4row register' to create one`".format(user.mention, self.PREFIXES[0]))
@commands.command(pass_context=True, no_pm=True)
async def listtokens(self, ctx):
"""DM's a list of available tokens."""
user= ctx.message.author
await self.bot.say("{} ` Check DM for available tokens.`".format(user.mention))
msg = await self.msg_available_tokens()
await self.bot.send_message(ctx.message.author, msg)
@_4row.command(name="leaderboard", pass_context=True)#Conflict's with Economy, so it became sub command.
async def _leaderboard(self, ctx, page: int=-1):
"""Shows the 'Four in a row' leaderboard."""
user = ctx.message.author
page -= 1
try:
resultRankings = await self.get_rankings(ctx, user.id)# Returns{"topScore": array(userId/Score), "userIdRank": string(userId)}
topScore = resultRankings["topScore"]
userIdRank = resultRankings["userIdRank"]
playerAmount = len(self.players["PLAYERS"])
data = True
except Exception as e:
logger.info(e)
data = False
# Put players and their earned points in to a table.
msgHeader = "{}\n```erlang\nPosition | Username | Score\n---------------------------------------------------------\n".format(user.mention)
if data and playerAmount >= 1:
await self.delete_message(ctx)
pages = []
totalPages = 0
usr = 0
userFound = False
userFoundPage = False
msg = ""
while (usr < playerAmount):
w=usr+10
while (w > usr):
if usr >= playerAmount:
break
ul = len(topScore[usr][2])
sp = ' '# Discord username max length = 32 +1
sp = sp[ul:]
sn = ' '
if usr+1 >= 10: sn = ' '
if usr+1 >= 100: sn = ' '
if usr+1 >= 1000: sn = ' '
if user.id == topScore[usr][0]:
msg = msg+"#({}){}| » {} | ({})\n".format(usr+1, sn, topScore[usr][2]+sp, topScore[usr][1])
userFound = True
userFoundPage = totalPages
else:
msg = msg+"#({}){}| {} | ({})\n".format(usr+1, sn, topScore[usr][2]+sp, topScore[usr][1])
usr += 1
pages.append(msg)
totalPages += 1
msg = ""
usr += 1
# Determine what page to show.
if page <= -1:# Show page with user.
selectPage = userFoundPage
elif page >= totalPages:
selectPage = totalPages-1# Flood -1
elif page in range(0, totalPages):
selectPage = page
else:# Show page 0
selectPage = 0
await self.bot.say( "{}{}\nTotal players:({})\nPage:({}/{})```".format(msgHeader, pages[selectPage], playerAmount, selectPage+1, totalPages))
else:
await self.bot.say( "`No accounts in the Four in a row register`".format(user.mention))
logger.info("Error @ _leaderboard, players < 1")
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Moderator Commands @ 4row
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@_4row.command(name="stpg", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _stpg(self, ctx):
"""Force stop/delete a game from the channel.
Admin/owner restricted."""
user= ctx.message.author
server = ctx.message.server
await self.stop_game(ctx)
logger.info("{}({}) has removed the Game from {}({})".format(user, user.id, ctx.message.channel, ctx.message.channel.id))
await self.bot.say("{} ` Game stopped. `".format(user.mention))
@_4row.command(name="maxplayers", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _maxplayers(self, ctx, maxp: int):
"""Changes the maximum amount of players that can join a game.
Admin/owner restricted."""
user= ctx.message.author
if maxp <= 4:
self.settings["MAX_PLAYERS"] = maxp
await self.bot.say("{} ` The maximum amount of players in game is now {}. `".format(user.mention, str(maxp)))
logger.info("{}({}) has set MAX_PLAYERS = {}".format(user, user.id, str(maxp)))
fileIO(SETTINGS, "save", self.settings)
else:
await self.bot.say("{} ` Game is limited to 4 players max. `".format(user.mention))
@_4row.command(name="expiretime", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _expiretime(self, ctx, expireTime : int):
"""Changes the expire time for a game.
Admin/owner restricted."""
user= ctx.message.author
if expireTime == 0:
expireTime = 1
self.settings["EXPIRE_TIME"] = expireTime
await self.bot.say("{} ` Game expires after {} seconds.`".format(user.mention, str(expireTime)))
logger.info("{}({}) has set EXPIRE_TIME = {}".format(user, user.id, str(expireTime)))
fileIO(SETTINGS, "save", self.settings)
@_4row.command(name="unlocktime", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _unlocktime(self, ctx, unlockTime : int):
"""Changes the time to unlock voting.
Admin/owner restricted."""
user= ctx.message.author
if unlockTime == 0:
unlockTime = 1
self.settings["VOTE_UNLOCK_TIME"] = unlockTime
await self.bot.say("{} ` Game voting unlocks at {} seconds.`".format(user.mention, str(unlockTime)))
logger.info("{}({}) has set VOTE_UNLOCK_TIME = {}".format(user, user.id, str(unlockTime)))
fileIO(SETTINGS, "save", self.settings)
@_4row.command(name="unlockvotes", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _unlockvotes(self, ctx, minVotes : int):
"""Changes the amount of votes to unlock a game for stop.
Admin/owner restricted."""
user= ctx.message.author
self.settings["MIN_VOTES_TO_UNLOCK"] = minVotes
await self.bot.say("{} ` Game now stops after {} votes.`".format(user.mention, str(minVotes)))
logger.info("{}({}) has set MIN_VOTES_TO_UNLOCK = {}".format(user, user.id, str(minVotes)))
fileIO(SETTINGS, "save", self.settings)
@_4row.command(name="togglebot", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _togglebot(self, ctx):
"""Enable/Disables the use of a bot player *you should leave this disabled for now.
Admin/owner restricted."""
user= ctx.message.author
if self.settings["BOT_SETTINGS"]["ENABLED"]:
self.settings["BOT_SETTINGS"]["ENABLED"] = False
allowBot = "Disabled"
elif not self.settings["BOT_SETTINGS"]["ENABLED"]:
self.settings["BOT_SETTINGS"]["ENABLED"] = True
allowBot = "Enabled"
await self.bot.say("`Work in progress. Be aware that enabling and using this may cause strange behaviour`")#deleteme
await self.bot.say("{} ` The in-game bot is now: {}.`".format(user.mention, allowBot))
logger.info("{}({}) has {} the in-game bot.".format(user, user.id, allowBot.upper()))
fileIO(SETTINGS, "save", self.settings)
@_4row.command(name="toggleqmsg", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _toggleqmsg(self, ctx):
"""Enable/Disables player comments.
Admin/owner restricted."""
user= ctx.message.author
if self.settings["ENA_QUEUE_MSG"]:
self.settings["ENA_QUEUE_MSG"] = False
allowMsg = "Disabled"
elif not self.settings["ENA_QUEUE_MSG"]:
self.settings["ENA_QUEUE_MSG"] = True
allowMsg = "Enabled"
await self.bot.say("{} ` The in-game user comments are now: {}.`".format(user.mention, allowMsg))
logger.info("{}({}) has {} the in-game user comments.".format(user, user.id, allowMsg.upper()))
fileIO(SETTINGS, "save", self.settings)
@_4row.command(name="botdifficulty", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _botdifficulty(self, ctx, difficulty : str):
"""Changes the default l33tnes of the bot.
Admin/owner restricted."""
user= ctx.message.author
if difficulty in self.settings["BOT_SETTINGS"]["DIFFICULTY"]:
if difficulty == "EASY":
difficultySet = self.settings["BOT_SETTINGS"]["DIFFICULTY"]["EASY"]
elif difficulty == "NOVICE":
difficultySet = self.settings["BOT_SETTINGS"]["DIFFICULTY"]["NOVICE"]
elif difficulty == "HARD":
difficultySet = self.settings["BOT_SETTINGS"]["DIFFICULTY"]["HARD"]
self.settings["BOT_SETTINGS"]["DEFAULT_DIFFICULTY"] = difficultySet
await self.bot.say("{} ` Game bot difficulty is now {}.`".format(user.mention, difficulty))
logger.info("{}({}) has set DEFAULT_DIFFICULTY = {}".format(user, user.id, str(difficulty)))
fileIO(SETTINGS, "save", self.settings)
else:
await self.bot.say("{} ` Choose between EASY, NOVICE , HARD.`")
fileIO(SETTINGS, "save", self.settings)
@_4row.command(name="backup", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _backup(self, ctx):
"""Backup a copy of the user database.
Admin/owner restricted."""
user= ctx.message.author
if not fileIO(BACKUP, "check"):
logger.info("Writing a Backup ...")
fileIO(BACKUP, "save", self.players)
await self.bot.say("{} ` Backup done.`".format(user.mention, BACKUP))
logger.info("{}({}) has made a BACKUP ({})".format(user, user.id, BACKUP))
elif fileIO(BACKUP, "check"):
await self.bot.say("` Backup found, overwrite it? yes/no`")
response = await self.bot.wait_for_message(author=ctx.message.author)
if response.content.lower().strip() == "yes":
logger.info("Overwriting Backup")
fileIO(BACKUP, "save", self.players)
await self.bot.say("{} ` Backup done.`".format(user.mention))
logger.info("{}({}) has made a BACKUP ({})".format(user, user.id, BACKUP))
else:
await self.bot.say("`Backup cancled.`")
@_4row.command(name="restore", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _restore(self, ctx):
"""Restore a copy of the user database.
Admin/owner restricted."""
user= ctx.message.author
if not fileIO(BACKUP, "check"):
logger.info("No backup Found!")
await self.bot.say("{} ` No backup Found!`".format(user.mention))
logger.info("{}({}) Restoring backup FAILED ({})".format(user, user.id, BACKUP))
elif fileIO(BACKUP, "check"):
if fileIO(PLAYERS, "check"):
await self.bot.say("` a players.json is found, overwrite it with the backup data? yes/no`")
response = await self.bot.wait_for_message(author=ctx.message.author)
if response.content.lower().strip() == "yes":
logger.info("Restoring players.json ...")
backup = fileIO(BACKUP, "load")
fileIO(PLAYERS, "save", backup)
await self.bot.say("{} ` Backup restored.`".format(user.mention))
logger.info("{}({}) Has RESTORED the backup ({})".format(user, user.id, BACKUP))
else:
await self.bot.say("` Restore cancled.`")
else:
logger.info("Restoring players.json ...")
backup = fileIO(BACKUP, "load")
fileIO(PLAYERS, "save", backup)
await self.bot.say("{} ` Backup restored.`".format(user.mention))
logger.info("{}({}) Has RESTORED the backup ({})".format(user, user.id, BACKUP))
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Various Functions
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Check if there is an account made.
def account_check(self, id):
try:
REG_PLAYERS = self.players["PLAYERS"]
data = True
except:
data = False
if data:
if id in REG_PLAYERS:
return True
else:
return False
else:
return False
# Check if user.id is currently in channel.id game.
def ingame_check(self, ctx, userId):
try:
activePlayers = self.game["CHANNELS"][ctx.message.channel.id]["activePlayers"]
data = True
except:
data = False
if data:
for usr in range(activePlayers):
if userId == self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"][usr]:
return True
return False
else:
return False
# Generate an empty board.
def empty_board(self, size):
board = []
for x in range(self.settings["BOARDHEIGHT"][size]):
board.append([self.EMPTY] * self.settings["BOARDWIDTH"][size])
return board
# Register a player.
async def register_player(self, ctx, user):
if user == ctx.message.server.me:
preferred = self.settings["BOT_SETTINGS"]["TOKEN"]
print(preferred)
playerMsg = "nomsg"
joiningMsg = "Initializing cheats..."
victoryMsg = "Next..."
else:
preferred = 0
playerMsg = "nomsg"
joiningMsg = "Newbie"
victoryMsg = "nomsg"
self.players["PLAYERS"][user.id] = {"boardId": "noGame",
"tokenPreferred": preferred,
"tokenAssinged": 0,
"playerId": user.id,
"playerName": user.display_name,
"MSG": {"playerMsg": playerMsg, "victoryMsg": victoryMsg, "joiningMsg": joiningMsg},
"STATS": {"won": 0, "loss": 0, "draw": 0, "wasted": 0, "totalMoves": 0, "points" : 10,"averageTimeTurn": 0, "avarageTimeGame": 0}}
fileIO(PLAYERS, "save", self.players)
if user == ctx.message.server.me:
logger.info("Four in a row bot account created by channel: {}".format(ctx.message.channel.id))
else:
msg = ("{} `Account created`".format(user.mention))
return {"showMsg": True, "msg": msg}
# Join Game.
async def join_game(self, ctx, user):
msg = ""
CH_PLAYERS = self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]
if user.id not in self.players["PLAYERS"]:
msg = ("{} ` You need to be registered to join or start a game.\nType: '{}4row register'`".format(user.mention, self.PREFIXES[0]))
return {"delMsg": False, "showMsg": True, "drawBoard": False, "msg": msg}
elif user.id in self.players["PLAYERS"]:
if ctx.message.channel.id in self.game["CHANNELS"]:
activePlayers = self.game["CHANNELS"][ctx.message.channel.id]["activePlayers"]
if activePlayers >= 0 and activePlayers <= self.settings["MAX_PLAYERS"]-1: # At least one free slot.
# Check if user is already in game.
if user.id in CH_PLAYERS["IDS"]:
msg = ("{} `You already joined...`".format(user.mention))
return {"delMsg": False, "showMsg": True, "drawBoard": False, "msg": msg}
elif user.id not in CH_PLAYERS["IDS"]:
# Add user to game.
CH_PLAYERS["NAMES"].append(user.display_name)
CH_PLAYERS["IDS"].append(user.id)
CH_PLAYERS["TOKENS"].append(0)
self.game["CHANNELS"][ctx.message.channel.id]["turnIds"].append(user.id)
self.players["PLAYERS"][user.id]["playerName"] = user.display_name
if user.id != ctx.message.server.me: # Escape bot.
self.players["PLAYERS"][user.id]["MSG"]["playerMsg"] = "nomsg"
self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"] = CH_PLAYERS
fileIO(GAMES, "save", self.game)
# User is now a part of total players.
activePlayers += 1
self.game["CHANNELS"][ctx.message.channel.id]["activePlayers"] = activePlayers
# Attach game to user.
self.players["PLAYERS"][user.id]["boardId"] = ctx.message.channel.id
# Assign token (switch from none/:x: to preferred OR available).
msg = await self.token_switch(ctx, user)
# Scale Board.
if activePlayers <= 2:#2
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 0
self.game["CHANNELS"][ctx.message.channel.id]["board"] = self.empty_board(0)
elif activePlayers == 3:#3
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 1
self.game["CHANNELS"][ctx.message.channel.id]["board"] = self.empty_board(1)
elif activePlayers == 4:#4
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 2
self.game["CHANNELS"][ctx.message.channel.id]["board"] = self.empty_board(2)
else:
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 0
self.game["CHANNELS"][ctx.message.channel.id]["board"] = self.empty_board(0)
await self.reset_voting(ctx)
# Save it all.
fileIO(GAMES, "save", self.game)
fileIO(PLAYERS, "save", self.players)
# Output msg.
if activePlayers <= 1:
msg = ("\n I need at least one more player. \nType: '{}4row join' to join this game...\n{}".format(self.PREFIXES[0], msg))
return {"delMsg": True, "showMsg": True, "drawBoard": True, "msg": msg}
elif activePlayers >= 2:
msg = ("\n` Type '{}4row start' to play`{}".format(self.PREFIXES[0], msg))
return {"delMsg": True, "showMsg": True, "drawBoard": True, "msg": msg}
elif activePlayers >= self.settings["MAX_PLAYERS"]:
msg = ("{} ` Sorry no slots available, try again next game.`".format(user.mention))
return {"delMsg": False, "showMsg": True, "drawBoard": False, "msg": msg}
else:
msg = ("{} ` No game pending, type '{}4row new' to start one.`".format(user.mention, self.PREFIXES[0]))
return {"delMsg": False, "showMsg": True, "drawBoard": False, "msg": msg}
else:
return {"delMsg": True, "showMsg": True, "drawBoard": True, "msg": msg}
# Leave the game.
async def leave_game(self, ctx, user):
try:
activePlayers = self.game["CHANNELS"][ctx.message.channel.id]["activePlayers"]
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
data = True
except Exception as e:
logger.info(e)
data = False
if user == ctx.message.server.me:
removeBot = True
else:
removeBot = False
deleted = False
for usr in range(0, activePlayers):
if user.id == self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"][usr]:# Get user position.
# turnIds and activePlayers are crucial for an started game.
if self.game["CHANNELS"][ctx.message.channel.id]["inQue"] == "yes":
self.players["PLAYERS"][user.id]["boardId"] = "noGame"
self.players["PLAYERS"][user.id]["MSG"]["playerMsg"] = "has left"
# Delete user from game.
for usr2 in range(activePlayers): # usr2 == usr1 ?
if self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"][usr2] == user.id:
# Remove board id from user.
self.players["PLAYERS"][user.id]["boardId"] = "noGame"
self.players["PLAYERS"][user.id]["MSG"]["playerMsg"] = "has left"
# Delete user items from game.
del self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"][usr2]
del self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["NAMES"][usr2]
del self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["TOKENS"][usr2]
del self.game["CHANNELS"][ctx.message.channel.id]["turnIds"][usr2]
activePlayers -= 1
self.game["CHANNELS"][ctx.message.channel.id]["activePlayers"] = activePlayers
# Adjust "empty" board size.
if activePlayers <= 2:
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 0
elif activePlayers == 3:
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 1
elif activePlayers == 4:
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 2
else:
self.game["CHANNELS"][ctx.message.channel.id]["boardSize"] = 0
deleted = True
break
elif self.game["CHANNELS"][ctx.message.channel.id]["inQue"] == "no":
# Set next turn before we skip id.
self.next_turn(ctx, user)# May need a check for turn, for now it seems to work ok.
# Remove board id from user.
self.players["PLAYERS"][user.id]["boardId"] = "noGame"
self.players["PLAYERS"][user.id]["MSG"]["playerMsg"] = "has left"
# Ad user id to skipIds.
self.game["CHANNELS"][ctx.message.channel.id]["skipIds"].append(user.id)
# Remove player from turnIds.
for usr3 in range(len(self.game["CHANNELS"][ctx.message.channel.id]["turnIds"])):
if self.game["CHANNELS"][ctx.message.channel.id]["turnIds"][usr3] == user.id:
del self.game["CHANNELS"][ctx.message.channel.id]["turnIds"][usr3]
deleted =True
break
break
break
fileIO(PLAYERS, "save", self.players)
fileIO(GAMES, "save", self.game)
inGameIds = len(self.game["CHANNELS"][ctx.message.channel.id]["turnIds"])
# Check amount of users in still in-game.
stopGame = False
if inGameIds < self.settings["MIN_PLAYERS"] and inQue == "no":
stopGame = True
else:
stopGame = False
# When one player left in active game.
if deleted and stopGame and inQue == "no":
#await self.stop_game(ctx)
msg = ("\n{} `I've removed you from the game. Well done {}, you ruined the game...`".format(user.mention, user))
return {"delMsg": False, "showMsg": True, "drawBoard": True, "msg": msg, "stopGame": True}
# When game in queue.
elif deleted and not stopGame:
msg = ("\n{} ` has left the game.`".format(user.mention))
return {"delMsg": True, "showMsg": True, "drawBoard": True, "msg": msg, "stopGame": False}
# This shouldn't happen.
else:
logger.info("Error at leave_game.\n id:{}, MIN_PLAYERS{}, stopGame:{} ".format(user.id, self.settings["MIN_PLAYERS"], str(stopGame)))
await self.dump_data()
msg = ("\n{} ` Unknown error.`".format(user.mention))
return {"delMsg": True, "showMsg": True, "drawBoard": False, "msg": msg, "stopGame": False}
# Start the game.
async def start_game(self, ctx, userId):
self.game["CHANNELS"][ctx.message.channel.id]["inQue"] = 'no'
await self.reset_voting(ctx)
self.stats["gamesStarted"] += 1
fileIO(STATS, "save", self.stats)
fileIO(GAMES, "save", self.game)
# Stop the game.
async def stop_game(self, ctx):
# Remove boardId from joined users of the game.
try:
for usr in range(len(self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"])):
self.players["PLAYERS"][self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]["IDS"][usr]]["boardId"] = "noGame"
except Exception as e:
logger.info(e)
logger.info("Error deleting {} from users. It seems that the game with user ids is already deleted elsewhere, check code and JSON)".format(ctx.message.channel.id))
await self.dump_data()
# Remove channel from games.
try:
del self.game["CHANNELS"][ctx.message.channel.id]
self.stats["gamesStopped"] += 1
fileIO(STATS, "save", self.stats)
except Exception as e:
logger.info(e)
logger.info("Error deleting {} from games. It seems that this game is already deleted elsewhere, check code and JSON)".format(ctx.message.channel.id))
await self.dump_data()
fileIO(GAMES, "save", self.game)
fileIO(PLAYERS, "save", self.players)
# Reset/update these after changes.
async def reset_voting(self, ctx):
now = round(time.time())
self.game["CHANNELS"][ctx.message.channel.id]["lastActivity"] = now
self.game["CHANNELS"][ctx.message.channel.id]["VOTES_STP"]["votes"] = 0
self.game["CHANNELS"][ctx.message.channel.id]["VOTES_STP"]["voteIds"] = []
fileIO(GAMES, "save", self.game)
# Returns avaiable tokens in message format.
async def msg_available_tokens(self):
tokens = deepcopy(self.TOKENS)
tokens.pop(0)
done = 0
lenLang = len(tokens)
msg = ""
while (done < lenLang):
w=done+4
while (w > done and done < lenLang):
msg = msg + "{} = {}, ".format(str(done+1), tokens[done][1])
done += 1
msg = msg + "\n"
done += 1
if len(msg) > 10:
msg = "\nAvailable Tokens for Four in a row (type !setmytoken to Choose):\n\n{}\n".format(msg)
return msg
# Returns an number array of unused tokens.
async def token_switch(self, ctx, user="", newToken=0):
msg = "\n`Error token switch`"
if user == "":
logger.info("Error token_switch, check script for 'user'.\n ctx: {}, newToken: {}'".format(ctx, newToken))
return msg
usedTokens = []
availableTokens = []
playersInQue = []
# Check if user has joined the channel game.
if self.players["PLAYERS"][user.id]["boardId"] is ctx.message.channel.id:
# Assign with newToken >= 1 (!mytoken) OR load with newToken = 0 (join game)
if newToken == 0:
newToken = self.players["PLAYERS"][user.id]["tokenPreferred"]
try:
inQue = self.game["CHANNELS"][ctx.message.channel.id]["inQue"]
CH_PLAYERS = self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"]
usedTokens = CH_PLAYERS["TOKENS"]
playersInQue = CH_PLAYERS["IDS"]
data = True
except Exception as e:
logger.info(e)
data = False
# Preferred has no special conditions.
self.players["PLAYERS"][user.id]["tokenPreferred"] = newToken
if data and inQue == "yes":
# Filter usable values from TOKENS.
for (key, t) in enumerate(self.TOKENS):
availableTokens.append(key)
unusedTokens = self.get_unused(availableTokens, usedTokens)
# Get user index position in game.
userPos = -1
if len(CH_PLAYERS["IDS"]) >= 0:
for usr in range(len(CH_PLAYERS["IDS"])):
if CH_PLAYERS["IDS"][usr] == user.id:
userPos = usr
break
if newToken not in usedTokens:
# Set preferred token in game.
CH_PLAYERS["TOKENS"][userPos] = newToken
self.players["PLAYERS"][user.id]["tokenAssinged"] = newToken
#print("token assigned {}".format(newToken))
msg = ""
#msg = ( "{} \n`Your preferred and assigned token is: {}\n `{}"
# .format(user.mention, self.TOKENS[newToken][0], self.TOKENS[newToken][1]))
else:
# Set random token in game.
newToken = random.choice(unusedTokens)
CH_PLAYERS["TOKENS"][userPos] = newToken
self.players["PLAYERS"][user.id]["tokenAssinged"] = newToken
msg = ( "\n\n{}`Your preferred token is set, but already used in the current game or default...\nSo I've randomly assiged you: {}`{}\n`Type {}help 4row for more info\n `"
.format(user.mention, self.TOKENS[newToken][0], self.TOKENS[newToken][1], self.PREFIXES[0]))
elif inQue == "no":
# Set preferred token in players.
self.players["PLAYERS"][user.id]["tokenPreferred"] = newToken
msg = ("\n\n{}`Your preferred token is: {}` {}\n`Since you are in game, it wil be available next game`"
.format(user.mention, self.TOKENS[newToken][0], self.TOKENS[newToken][1]))
# Save it all.
fileIO(PLAYERS, "save", self.players)
fileIO(GAMES, "save", self.game)
return msg
else: # Not in game.
# Set preferred token in players.
self.players["PLAYERS"][user.id]["tokenPreferred"] = newToken
msg = ( "\n`Your preferred token is: {}\n ` {}"
.format(self.TOKENS[newToken][0], self.TOKENS[newToken][1]))
fileIO(PLAYERS, "save", self.players)
return msg
# Check id is turn.
async def my_turn(self, ctx, userId):
turn = self.game["CHANNELS"][ctx.message.channel.id]["turnIds"][0]
if userId == turn:
return True
elif userId != turn:
return False
else:
logger.info("Error my_turn.\n ctx: {} userId: {}".format(ctx, userId))
await self.dump_data()
return False
# Check if there are no empty spaces anywhere on the board.
def board_full(self, ctx):
board = self.game["CHANNELS"][ctx.message.channel.id]["board"]
BOARD_SIZE = self.game["CHANNELS"][ctx.message.channel.id]["boardSize"]
for x in range(self.settings["BOARDHEIGHT"][BOARD_SIZE]):
for y in range(self.settings["BOARDWIDTH"][BOARD_SIZE]):
if board[x][y] == self.EMPTY:
return False
return True
# Return the row number of the lowest empty row in the given column.
def lowest_empty_space(self, ctx, column):
board = self.game["CHANNELS"][ctx.message.channel.id]["board"]
BOARD_SIZE = self.game["CHANNELS"][ctx.message.channel.id]["boardSize"]
for y in range(self.settings["BOARDHEIGHT"][BOARD_SIZE]-1, -1, -1):
if board[y][column] == self.EMPTY:
return y
else:
pass
return -1
# Make a move and set next turn.
async def make_move(self, ctx, user, column, freePos):
userToken = self.TOKENS[0][0] # Tracing
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
CH_PLAYERS = CH_GAME["PLAYERS"]
BOARD_SIZE = CH_GAME["boardSize"]
BOARDHEIGHT = self.settings["BOARDHEIGHT"][BOARD_SIZE]
stringgame = CH_GAME["board"]
activePlayers = CH_GAME["activePlayers"]
# Set time it took to make a move, and update the number of moves.
self.players["PLAYERS"][user.id]["STATS"]["totalMoves"] += 1
totalMoves = self.players["PLAYERS"][user.id]["STATS"]["totalMoves"]
now = round(time.time())
turnTime = (now - CH_GAME["lastActivity"])/totalMoves
self.players["PLAYERS"][user.id]["STATS"]["averageTimeTurn"] = turnTime
fileIO(PLAYERS, "save", self.players)
# Get position of user.
userPos = -1
if activePlayers >= 1:
for usr in range(activePlayers):
if CH_PLAYERS["IDS"][usr] == user.id:
userPos = usr
break
userToken = self.TOKENS[CH_PLAYERS["TOKENS"][userPos]][0]
# Add token to board at given column.
for y in range(BOARDHEIGHT-1, -1, -1):
if y == freePos:# When indexing reaches free position.
self.game["CHANNELS"][ctx.message.channel.id]["board"][y][column] = userToken
await self.reset_voting(ctx)
# Save it.
self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"] = CH_PLAYERS
fileIO(GAMES, "save", self.game)
# Returns an unused index of an araay.
def get_unused(self, arrayAvailable, arrayUsed):
output = []
seen = set()
for value in arrayAvailable:
if value not in arrayUsed and value not in seen:
output.append(value)
seen.add(value)
return output
# Set next turn (array order).
def next_turn(self, ctx, user):
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
CH_PLAYERS = CH_GAME["PLAYERS"]
skipIds = CH_GAME["skipIds"]
turnIds = CH_GAME["turnIds"]
activePlayers = CH_GAME["activePlayers"]
nextTurn = 'ERRnt' # Trace.
userPos = -1
if activePlayers >= 1:
for usr in range(activePlayers):
if CH_PLAYERS["IDS"][usr] == user.id:
userPos = usr
break
if turnIds[0] == CH_PLAYERS["IDS"][userPos]:
nextTurn = self.shift(turnIds, -1)# Shift id.
# Check if player has left the game.
if nextTurn[0] in skipIds:
nextTurn = self.shift(turnIds, -1)# Skip id.
self.game["CHANNELS"][ctx.message.channel.id]["turnIds"] = nextTurn
# Save all
self.game["CHANNELS"][ctx.message.channel.id]["PLAYERS"] = CH_PLAYERS
fileIO(GAMES, "save", self.game)
# Check if a certain token makes a winner.
def is_winner(self, ctx, tile):
board = self.game["CHANNELS"][ctx.message.channel.id]["board"]
BOARD_SIZE = self.game["CHANNELS"][ctx.message.channel.id]["boardSize"]
BOARDHEIGHT = self.settings["BOARDHEIGHT"][BOARD_SIZE]
BOARDWIDTH = self.settings["BOARDWIDTH"][BOARD_SIZE]
# Check horizontal.
for x in range(BOARDHEIGHT - 3):
for y in range(BOARDWIDTH):
if board[x][y] == tile and board[x+1][y] == tile and board[x+2][y] == tile and board[x+3][y] == tile:
return True
# Check vertical.
for x in range(BOARDHEIGHT):
for y in range(BOARDWIDTH - 3):
if board[x][y] == tile and board[x][y+1] == tile and board[x][y+2] == tile and board[x][y+3] == tile:
return True
# Check / diagonal.
for x in range(BOARDHEIGHT - 3):
for y in range(3, BOARDWIDTH):
if board[x][y] == tile and board[x+1][y-1] == tile and board[x+2][y-2] == tile and board[x+3][y-3] == tile:
return True
# Check \ diagonal.
for x in range(BOARDHEIGHT - 3):
for y in range(BOARDWIDTH - 3):
if board[x][y] == tile and board[x+1][y+1] == tile and board[x+2][y+2] == tile and board[x+3][y+3] == tile:
return True
return False
# Retuns a list of top scores.
async def get_rankings(self, ctx, userId=None):
user = ctx.message.author
# Get all earned points of players.
topScore = []
if len(self.players["PLAYERS"]) >= 1:
for p in self.players["PLAYERS"]:
points = self.players["PLAYERS"][p]["STATS"]["points"]
userName = self.players["PLAYERS"][p]["playerName"]
topScore.append((p, points, userName))
topScore = sorted(topScore, key=itemgetter(1), reverse=True)
# Get player rank.
userIdRank = 0
for index, p in enumerate(topScore):
if p[0] == user.id:
userIdRank = index+1
break
return {"topScore": topScore, "userIdRank": userIdRank}
# Update statistics of ingame players.
async def update_score(self, ctx):
try:
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
CH_PLAYERS = CH_GAME["PLAYERS"]
winnerId = CH_GAME["winner"]
except Exception as e:
logger.info(e)
logger.info("Error getting IDS @ update_score, check code and json dump.")
await self.dump_data()
return
user = ctx.message.author
now = round(time.time())
gameDuration = now-CH_GAME["gameStarted"]
# Set score to all players in game.
for usr in range(0, len(CH_PLAYERS["IDS"])):
userId = CH_PLAYERS["IDS"][usr]
if winnerId == "draw":# Draw.
stats = self.players["PLAYERS"][userId]["STATS"]
self.players["PLAYERS"][userId]["STATS"]["draw"] += 1
self.players["PLAYERS"][userId]["STATS"]["points"] += self.settings["REWARDS"]["DRAW"]
userTotalGames = stats["won"]+stats["loss"]+stats["draw"]+stats["wasted"]
avarageTimeGame = (stats["avarageTimeGame"] + gameDuration)/userTotalGames
self.players["PLAYERS"][userId]["STATS"]["avarageTimeGame"] += avarageTimeGame
msg = self.get_queue_msg(stats)
self.players["PLAYERS"][userId]["MSG"]["joiningMsg"] = msg
continue# Skip next checks.
elif winnerId == userId:# Winner.
userWinner = discord.utils.get(ctx.message.server.members, id = userId)
bank = self.bot.get_cog('Economy').bank
pay = bank.get_balance(userWinner) + 2500
bank.set_credits(userWinner, pay)
stats = self.players["PLAYERS"][userId]["STATS"]
self.players["PLAYERS"][userId]["STATS"]["won"] += 1
self.players["PLAYERS"][userId]["STATS"]["points"] += self.settings["REWARDS"]["WINNING"]
userTotalGames = stats["won"]+stats["loss"]+stats["draw"]+stats["wasted"]
avarageTimeGame = (stats["avarageTimeGame"] + gameDuration)/userTotalGames
self.players["PLAYERS"][userId]["STATS"]["avarageTimeGame"] += avarageTimeGame
msg = self.get_queue_msg(stats)
self.players["PLAYERS"][userId]["MSG"]["joiningMsg"] = msg
continue
elif winnerId != userId:# Must be one of the losers.
stats = self.players["PLAYERS"][userId]["STATS"]
self.players["PLAYERS"][userId]["STATS"]["loss"] += 1
self.players["PLAYERS"][userId]["STATS"]["points"] += self.settings["REWARDS"]["LOSING"]
userTotalGames = stats["won"]+stats["loss"]+stats["draw"]+stats["wasted"]
avarageTimeGame = (stats["avarageTimeGame"] + gameDuration)/userTotalGames
self.players["PLAYERS"][userId]["STATS"]["avarageTimeGame"] += avarageTimeGame
msg = self.get_queue_msg(stats)
self.players["PLAYERS"][userId]["MSG"]["joiningMsg"] = msg
continue
fileIO(PLAYERS, "save", self.players)
return
# Get the queue message / Rank.
def get_queue_msg(self, stats):
qMsgTrig = deepcopy(self.settings["TRIG_QUEUE_MSG"])
msg = "hoax"# This should not reach the json.
ponts = stats["points"]
won = stats["won"]+stats["draw"]
lost = stats["loss"]+stats["wasted"]
total = won+lost
if total == 0:
total = 1# Ensure a safe calc.
ratio = float(won)/(total)
# Newbie.
if total <= qMsgTrig[1][1]:
msg = qMsgTrig[1][0]
# No Newbie anymore.
elif total > qMsgTrig[1][1]:
#Remove unwanted from list.
del qMsgTrig[0] # Minumum
del qMsgTrig[0] # Newbie
lenList = len(qMsgTrig) # lenList is absolute max. (flood for loop.)
msg = qMsgTrig[0][0] # Player is at least a n00b.
for m in range(0, lenList):
# A player need the minumium ratio+total games to step up.
if ratio >= qMsgTrig[m][2] and total >= qMsgTrig[m][1]:
msg = qMsgTrig[m][0] # Set lvl value as msg.
if ratio >= qMsgTrig[m+1][2] and total >= qMsgTrig[m+1][1]:
# Ratio+total games apears to be higher than next [m] value so lvl up.
msg = qMsgTrig[m+1][0]
else:
# Player will not step up, for loop ends here, status text from [m] is the amount of loops made.
msg = qMsgTrig[m][0]
break # No need to check any further.
if m >= lenList-1:
break # Prevent indexError.
return msg
# Draw the board to chat.
async def draw_board(self, ctx, comment, DM=False):
user = ctx.message.author
try: # Get exitsing game data.
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
board = CH_GAME["board"]
BOARD_SIZE = CH_GAME["boardSize"]
turn = CH_GAME["turnIds"]
inQue = CH_GAME["inQue"]
CH_PLAYERS = CH_GAME["PLAYERS"]
skipIds = CH_GAME["skipIds"]
activePlayers = CH_GAME["activePlayers"]
data = True
except: # An empty board.
board = self.empty_board(0)
BOARD_SIZE = 0
turn = ["none"]
inQue = "yes"
CH_PLAYERS = {"IDS": [], "NAMES": [], "TOKENS": []}
skipIds = ["none"]
comment = "\n` There you have it, an empty 'Four in a row' board.\nTo create a new game type: '{}4row new'`".format(self.PREFIXES[0])
activePlayers = 0
data = False
userComment = "nomsg"
slots = {"IDS": [], "NAMES": [], "TOKENS": [], "MSG": []}
for slot in range(self.settings["MAX_PLAYERS"]):
slots["IDS"].append("noId")
slots["NAMES"].append("- EmptySlot #" + str(slot+1)+ " -")
slots["TOKENS"].append(self.ICONS[1][1])
slots["MSG"].append(userComment)
# Fill slot display up with players.
if len(CH_PLAYERS["IDS"]) is not None:
for usr in range(len(CH_PLAYERS["IDS"])):
slots["IDS"][usr] = CH_PLAYERS["IDS"][usr]
slots["NAMES"][usr] = CH_PLAYERS["NAMES"][usr]
slots["TOKENS"][usr] = CH_PLAYERS["TOKENS"][usr]
playerMsg = self.players["PLAYERS"][CH_PLAYERS["IDS"][usr]]["MSG"]["playerMsg"]
joiningMsg = self.players["PLAYERS"][CH_PLAYERS["IDS"][usr]]["MSG"]["joiningMsg"]
if CH_PLAYERS["IDS"][usr] in self.players["PLAYERS"] and playerMsg != "nomsg" or joiningMsg != "nomsg":
if self.settings["ENA_QUEUE_MSG"] and inQue == 'yes':
slots["MSG"][usr] = self.players["PLAYERS"][CH_PLAYERS["IDS"][usr]]["MSG"]["joiningMsg"]
elif self.settings["ENA_QUEUE_MSG"] and inQue == 'no':
slots["MSG"][usr] = self.players["PLAYERS"][CH_PLAYERS["IDS"][usr]]["MSG"]["playerMsg"]
else:
slots["MSG"][usr] = userComment
else:
slots["MSG"][usr] = userComment
tokensWidth = []
tokensHeight = []
tokenDef = None
msgBoard = '\n'
# Build up a board, note: Display index of tokens != game index of tokens (x=y, y=x).
for w in range(self.settings["BOARDWIDTH"][BOARD_SIZE]):
msgBoard = msgBoard+emoji.emojize(self.BOARD_HEADER[w])
msgBoard = msgBoard+'\n'
for x in range(self.settings["BOARDHEIGHT"][BOARD_SIZE]):#6 = default
for y in range(self.settings["BOARDWIDTH"][BOARD_SIZE]):#7 = default
for z in range(len(self.TOKENS)):
if board[x][y] == self.TOKENS[z][0]:
tokenDef = emoji.emojize(self.TOKENS[z][1], use_aliases=True)# User Token.
elif board[x][y] == self.EMPTY:
tokenDef = emoji.emojize(self.ICONS[0][1], use_aliases=True)# Black.
tokensWidth.append(tokenDef)
msgBoard = (msgBoard+tokenDef)
tokensHeight.append(tokensWidth)
msgBoard = (msgBoard+'\n')
# Set-up user name/slot display.
playerIs = ''
if inQue == 'yes': # Draw slots.
slotsLen = self.settings["MAX_PLAYERS"]
elif inQue == 'no':# Game is started.
slotsLen = activePlayers
turnUserMsg = " "
for usr in range(0, slotsLen):
# Get player message.
if slots["MSG"][usr] != "nomsg":
if slots["IDS"][usr] == ctx.message.server.me.id:# Player is bot.
userComment = " ("+("Initialising Cheats...")+")"
else:
userComment = " ("+str(slots["MSG"][usr])+")"
else:
userComment = ""
# Get token/icon for slot.
if slots["IDS"][usr] == "noId":
tToken = emoji.emojize(' '+self.ICONS[1][1]) # Pointing arrow.
else:
tToken = emoji.emojize(self.TOKENS[CH_PLAYERS["TOKENS"][usr]][1])# Player token.
# Game has started.
if inQue == 'no' and turn[0] == slots["IDS"][usr] and slots["IDS"][usr] not in skipIds:
mentionPlayer = turn[0]
turnUserMsg = (slots["NAMES"][usr]+"'s turn:")
# Highlight players.
ul = len(slots["IDS"][usr])
sp = ' «`'# Discord username max length = 32 +8
sp = sp[ul:]
playerIs = playerIs + (' '+ tToken + ' ` ' + slots["NAMES"][usr] + sp + '\n')
# Game in Queue.
else:
if inQue == 'no' and slots["IDS"][usr] not in skipIds:
mentionPlayer = turn[0]# Should not be necessary.
playerIs = playerIs + (' '+ tToken + ' ' + slots["NAMES"][usr] + '\n')
elif inQue == 'no' and slots["IDS"][usr] in skipIds:
mentionPlayer = turn[0]
playerIs = playerIs + (' '+ tToken + ' ' + '~~' + slots["NAMES"][usr]+'~~' + '\n')
elif inQue == 'yes':
mentionPlayer = user.id
if not data:
turnUserMsg = "An unboxed game:"
elif data:
turnUserMsg = "Game in queue:"
playerIs = playerIs + (' '+ tToken + ' ' + slots["NAMES"][usr] + userComment + '\n')
# Output board.
if DM:
await self.bot.send_message(ctx.message.author, "{}\n{}\n**{}**\n{}{}\n\n".format('<@'+mentionPlayer+'>', msgBoard, turnUserMsg, playerIs, comment))
elif not DM:
await self.bot.send_message(ctx.message.channel, "{}\n{}\n**{}**\n{}{}\n\n".format('<@'+mentionPlayer+'>', msgBoard, turnUserMsg, playerIs, comment))
# Shift an array.
def shift(self, seq, n):
shifted_seq = []
for i in range(len(seq)):
shifted_seq.append(seq[(i-n) % len(seq)])
return shifted_seq
# Dump data to json (from errors).
async def dump_data(self):
now = round(time.time())
f = "{}\_datadump{}.json".format(DIR_DATA, str(now))
s = "##############################################"
jsons = [s, self.game, s, self.settings, s, self.players, s, self.stats, s]
data = []
for d in jsons:
data.append(d)
logger.info("Dumping data in: {}".format(f))
fileIO(f, "save", data)
# Delete my message from chat.
async def delete_message(self, ctx, number=1, delComm=False):
server = ctx.message.server
can_delete = ctx.message.channel.permissions_for(server.me).manage_messages
user = ctx.message.server.me
author = ctx.message.author
message = ctx.message
cmdmsg = message
if number > 0 and number < 10000:
while True:
new = False
async for x in self.bot.logs_from(ctx.message.channel, limit=100, before=message):
if number == 0:
if delComm and can_delete:
try:
await self.bot.delete_message(cmdmsg)
except Exception as e:
logger,info(e)
logger.info("I need more permissions @ {} to delete messages other than my own.".format(ctx.message.channel))
return
if x.author.id == user.id:
await self.bot.delete_message(x)
number -= 1
new = True
message = x
if not new or number == 0:
await self.bot.delete_message(cmdmsg)
break
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Bot Player Specific Functions
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# The bot needs cheats.
def bot_move(self, ctx):
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
board = CH_GAME["board"]
BOARD_SIZE = CH_GAME["boardSize"]
BOARDHEIGHT = self.settings["BOARDHEIGHT"][BOARD_SIZE]
BOARDWIDTH = self.settings["BOARDWIDTH"][BOARD_SIZE]
DIFFICULTY = CH_GAME["botDifficulty"]
return
botTokenAssigned = self.players["PLAYERS"][ctx.message.server.me.id]["tokenAssinged"]
botToken = self.TOKENS[botTokenAssigned][0]
potentialMoves = self.potential_moves(ctx, botToken, DIFFICULTY)
print("potentialMoves")
print(potentialMoves)
# Get the best fitness from the potential moves.
bestMoveFitness = -1
for i in range(BOARDWIDTH):
print(i)
if potentialMoves[i] > bestMoveFitness and self.valididate_move(ctx, i):
bestMoveFitness = potentialMoves[i]
print("bestmovefitness:")
print(bestMoveFitness)
# Find all potential moves that have this best fitness.
bestMoves = []
for i in range(len(potentialMoves)):
if potentialMoves[i] >= bestMoveFitness and self.valididate_move(ctx, i):
bestMoves.append(i)
print("bestmoves/random")
print(bestMoves)
if bestMoves != []:
return random.choice(bestMoves)
# Cheats (bot and admins only), figure out the best move to make.
def potential_moves(self, ctx, tile, lookAhead):
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
board = CH_GAME["board"]
BOARD_SIZE = CH_GAME["boardSize"]
BOARDHEIGHT = self.settings["BOARDHEIGHT"][BOARD_SIZE]
BOARDWIDTH = self.settings["BOARDWIDTH"][BOARD_SIZE]
enemyTokens = []
potentialMoves = [0] * BOARDWIDTH
if lookAhead == 0 or self.board_full(ctx):
return [0] * BOARDWIDTH
for t in range(len(CH_GAME["PLAYERS"]["IDS"])):
enemyTokens.append(self.TOKENS[CH_GAME["PLAYERS"]["TOKENS"][t]][0])
enemys = t
print("ensmys/enemy tokens")
print(enemys)
print(enemyTokens)
# Two player test.
# needs some kind of deep copy of ctx to check posibility's, or duplicate game functions to only check on a board.
for firstMove in range(BOARDWIDTH):
if not self.valididate_move(ctx, firstMove):
print("{} - not valid".format(firstMove))
continue
if self.is_winner(ctx, enemyTokens[0]):
potentialMoves[firstMove] = 1# Winning move gets a perfect fitness.
print("{} - lowest_empty_space".format(firstMove))
print(self.lowest_empty_space(ctx, firstMove))
break# Don't bother calculating other moves.
else:# No winning move, check another player.
if self.board_full(ctx):
potentialMoves[firstMove] = 0
else:
print("Check enemy")
for counterMove in range(BOARDWIDTH): # Check counterMoves.
ctx2 = ctx#
if not self.valididate_move(ctx2, counterMove):
print("{} - not valid Enemy".format(counterMove))
continue
if self.is_winner(ctx2, enemyTokens[1]):
potentialMoves[counterMove] = 1# Winning enemy move gets a worst fitness.
print("{} - lowest_empty_space Enemy".format(counterMove))
print(self.lowest_empty_space(ctx2, counterMove))
break# Don't bother calculating other moves.
else:
# Make a recursive call to potential_moves()
results = self.potential_moves(ctx2, enemyTokens[1], lookAhead - 1)
print(results)
potentialMoves[firstMove] += (sum(results) / BOARDWIDTH) / BOARDWIDTH
return potentialMoves
# Validate move (bot), check if there is an empty space within the given column.
def valididate_move(self, ctx, column):
CH_GAME = self.game["CHANNELS"][ctx.message.channel.id]
board = CH_GAME["board"]
BOARD_SIZE = CH_GAME["boardSize"]
BOARDHEIGHT = self.settings["BOARDHEIGHT"][BOARD_SIZE]
BOARDWIDTH = self.settings["BOARDWIDTH"][BOARD_SIZE]
if column < 0 or column > (BOARDHEIGHT) or board[column][0] != self.EMPTY:
return False
return True
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Development Commands
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Removed » https://github.com/Canule/Red-DiscordBot/blob/develop/cogs/devt/devTool4row.py
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Set-up
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def check_folders():
if not os.path.exists(DIR_DATA):
print("Creating {} folder...".format(DIR_DATA))
os.makedirs(DIR_DATA)
def check_files():
# 1 player, 21dots
# 2 players, 42dots = 7*6, Original game
# 3 players, 63dots = 8*7, 8*7=56(-7)« 9*8=72(+9)
# 4 players, 84dots = 10*9, 10*9=90(+6)« 9*8=72(-12)
settings = {
"BOARDWIDTH": [7, 8, 10],
"BOARDHEIGHT": [6, 7, 9],
"BOARD_HEADER": [":one:", ":two:", ":three:", ":four:", ":five:", ":six:", ":seven:", ":eight:", ":nine:", ":keycap_ten:", ":a:", ":b:", ":c:"],
"MIN_PLAYERS": 2,
"MAX_PLAYERS": 2,
"ENA_QUEUE_MSG": False,
"TRIG_QUEUE_MSG": [["", 0, 0.0, "x"],
["a Newbie", 3, 0.0, "Newbie"],
["a N00b", 5, 0.20, "*"],
["An average player", 5, 0.4, "**"],
["The Pro.", 10, 0.60, "****"],
["The Unbeatable", 10, 1.0, "*****"],
["", sys.maxsize, 2.0]],
"MAX_LEN_USER_MSG": 30,
"REWARDS": {"WINNING": 40, "LOSING": 20, "DRAW": 50, "RUIENING": -15},
"TIME_PENALTY": {"SLOW_MOVES_TIME": [60, 80, 120], "POINTS": [-3,-2,-1]},
"EXPIRE_TIME": 900,
"VOTE_UNLOCK_TIME": 120,
"MIN_VOTES_TO_UNLOCK": 2,
"BOT_SETTINGS": {"ENABLED": False, "DEFAULT_DIFFICULTY": 1, "TOKEN": 3, "DIFFICULTY": {"EASY": 1 , "NOVICE": 2, "HARD": 4}},
"ICONS": [["black", ":black_circle:"], ["arrow", "→"], ["recycle", ":recycle:"], ["cross", ":x:"]],
"TOKENS": [["none", ":x:"], ["red circle", ":red_circle:"], ["blue circle", ":large_blue_circle:"], ["baseball", ":baseball:"], ["tennisball", ":tennis:"], ["8ball", ":8ball:"],
["basketball", ":basketball:"], ["cd", ":cd:"], ["dvd", ":dvd:"], ["full moon", ":full_moon:"], ["new moon", ":new_moon:"], ["rice cracker", ":rice_cracker:"],
["no entry", ":no_entry:"], ["cherries", ":cherries:"], ["cookie", ":cookie:"], ["clover", ":four_leaf_clover:"], ["cyclone", ":cyclone:"], ["sunflower", ":sunflower:"],
["mushroom", ":mushroom:"], ["heart", ":heart:"], ["snowflake", ":snowflake:"], ["Africa globe", ":earth_africa:"], ["Murica globe", ":earth_americas:"],
["asia globe", ":earth_asia:"]]}
f = SETTINGS
if not fileIO(f, "check"):
print("Creating default fourinarow's settings.json...")
fileIO(f, "save", settings)
games = {"CHANNELS": {}}
f = GAMES
if not fileIO(f, "check"):
print("Creating empty game.json...")
fileIO(f, "save", games)
players = {"PLAYERS": {}}
f = PLAYERS
if not fileIO(f, "check"):
print("Creating empty players.json...")
fileIO(f, "save", players)
stats = {
"gamesStarted": 0,
"gamesStopped": 0,
"gamesRuined": 0,
"gamesTimedOut": 0,
"gamesUnlocked": 0}
f = STATS
if not fileIO(f, "check"):
print("Creating empty stats.json...")
fileIO(f, "save", stats)
class ModuleNotFound(Exception):
def __init__(self, m):
self.message = m
def __str__(self):
return self.message
def setup(bot):
global emoji
global logger
check_folders()
check_files()
logger = logging.getLogger("fourinarow")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename=LOGGER, encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
try:
import emoji
"""The max amount of ':thumbsdown:' in one chat message = x166 with len2000 as max. allowed for msg. When using bytewise 👎 / Unicode instead, it becomes x1999.
Most of these are supported by Discord http://www.emoji-cheat-sheet.com == https://github.com/carpedm20/emoji '\Python35\Lib\site-packages\emoji\\unicode_codes.py'
*No need to modify the UFT-8 dataIO.py or store and manually maintain the modest array of all Unicode emoji in this script itself."""
except:
raise ModuleNotFound("emoji is not installed. Do 'pip3 install emoji --upgrade' to use this cog.")
bot.add_cog(FourInARow(bot))
logger.info("----Game Reloaded----")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 Accenture Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mercury.system.utility import Utility
class MultiLevelDict:
def __init__(self, data=None):
self.util = Utility()
self.normalized = False
self.dataset = dict() if data is None else data
if not isinstance(self.dataset, dict):
raise ValueError('Invalid input - Expect: dict, Actual: '+str(type(data)))
def get_dict(self):
return self.dataset
@staticmethod
def is_digits(n: str):
for i in n:
if i < '0' or i > '9':
return False
return True
@staticmethod
def is_list_element(item: str):
return '[' in item and item.endswith(']') and (not item.startswith('['))
def set_element(self, composite_path: str, value: any, source_data: dict = None):
if composite_path is None:
raise ValueError('Missing composite_path')
self.validate_composite_path_syntax(composite_path)
data = self.dataset if source_data is None else source_data
if not isinstance(data, dict):
raise ValueError('Invalid input - Expect: dict, Actual: '+str(type(data)))
segments = self.util.multi_split(composite_path, './')
if len(segments) == 0:
return
current = data
size = len(segments)
n = 0
composite = ''
for p in segments:
n += 1
if self.is_list_element(p):
sep = p.index('[')
indexes = self._get_indexes(p[sep:])
element = p[0:sep]
parent = self.get_element(composite+element, source_data)
if n == size:
if isinstance(parent, list):
self._set_list_element(indexes, parent, value)
else:
new_list = list()
self._set_list_element(indexes, new_list, value)
current[element] = new_list
break
else:
if isinstance(parent, list):
next_dict = self.get_element(composite+p, source_data)
if isinstance(next_dict, dict):
current = next_dict
else:
m = dict()
self._set_list_element(indexes, parent, m)
current = m
else:
next_map = dict()
new_list = list()
self._set_list_element(indexes, new_list, next_map)
current[element] = new_list
current = next_map
else:
if n == size:
current[p] = value
break
else:
if p in current and isinstance(current[p], dict):
current = current[p]
else:
next_map = dict()
current[p] = next_map
current = next_map
composite = composite + p + '.'
def _set_list_element(self, indexes: list, source_data: list, value: any):
current = self._expand_list(indexes, source_data)
size = len(indexes)
for i in range(0, size):
idx = indexes[i]
if i == size - 1:
current[idx] = value
else:
o = current[idx]
if isinstance(o, list):
current = o
@staticmethod
def _expand_list(indexes: list, source_data: list):
current = source_data
size = len(indexes)
for i in range(0, size):
idx = indexes[i]
if idx >= len(current):
diff = idx - len(current)
while diff >= 0:
current.append(None)
diff -= 1
if i == size - 1:
break
o = current[idx]
if isinstance(o, list):
current = o
else:
new_list = list()
current[idx] = new_list
current = new_list
return source_data
@staticmethod
def _is_composite(path: str):
return True if '.' in path or '/' in path or '[' in path or ']' in path else False
def _get_indexes(self, index_segment: str):
result = list()
indexes = self.util.multi_split(index_segment, '[]')
for i in indexes:
if self.is_digits(i):
result.append(int(i))
else:
result.append(-1)
return result
@staticmethod
def _get_list_element(indexes: list, source_data: list):
if (not isinstance(indexes, list)) or (not isinstance(source_data, list)) \
or len(indexes) == 0 or len(source_data) == 0:
return None
current = source_data
n = 0
size = len(indexes)
for i in indexes:
n += 1
if not isinstance(i, int):
return None
if i < 0 or i >= len(current):
break
o = current[i]
if n == size:
return o
if isinstance(o, list):
current = o
else:
break
return None
def get_element(self, composite_path: str, source_data: dict = None):
if composite_path is None:
return None
data = self.dataset if source_data is None else source_data
if not isinstance(data, dict):
raise ValueError('Invalid input - Expect: dict, Actual: '+str(type(data)))
if len(data) == 0:
return None
# special case for top level element that is using composite itself
if composite_path in data:
return data[composite_path]
if not self._is_composite(composite_path):
return data[composite_path] if composite_path in data else None
parts = self.util.multi_split(composite_path, './')
current = dict(data)
size = len(parts)
n = 0
for p in parts:
n += 1
if self.is_list_element(p):
start = p.index('[')
end = p.index(']', start)
if end == -1:
break
key = p[0: start]
index = p[start+1: end].strip()
if len(index) == 0 or not self.is_digits(index):
break
if key in current:
next_list = current[key]
if isinstance(next_list, list):
indexes = self._get_indexes(p[start:])
next_result = self._get_list_element(indexes, next_list)
if n == size:
return next_result
if isinstance(next_result, dict):
current = next_result
continue
else:
if p in current:
next_dict = current[p]
if n == size:
return next_dict
elif isinstance(next_dict, dict):
current = next_dict
continue
# item not found
break
return None
def normalize_map(self):
if not self.normalized:
# do only once
self.normalized = True
flat_map = self.get_flat_map(self.dataset)
result = dict()
for k in flat_map:
self.set_element(k, flat_map[k], result)
self.dataset = result
def get_flat_map(self, data: dict = None):
if not isinstance(data, dict):
raise ValueError('Invalid input - Expect: dict, Actual: '+str(type(data)))
result = dict()
self._get_flat_map(None, data, result)
return result
def _get_flat_map(self, prefix: any, src: dict, target: dict):
for k in src:
v = src[k]
key = k if prefix is None else prefix + "." + k
if isinstance(v, dict):
self._get_flat_map(key, v, target)
elif isinstance(v, list):
self._get_flat_list(key, v, target)
else:
target[key] = v
def _get_flat_list(self, prefix: str, src: list, target: dict):
n = 0
for v in src:
key = prefix + "[" + str(n) + "]"
n += 1
if isinstance(v, dict):
self._get_flat_map(key, v, target)
elif isinstance(v, list):
self._get_flat_list(key, v, target)
else:
target[key] = v
def validate_composite_path_syntax(self, path: str):
segments = self.util.multi_split(path, './')
if len(segments) == 0:
raise ValueError('Missing composite path')
for s in segments:
if '[' in s or ']' in s:
if '[' not in s:
raise ValueError('Invalid composite path - missing start bracket')
if not s.endswith(']'):
raise ValueError('Invalid composite path - missing end bracket')
sep1 = s.index('[')
sep2 = s.index(']')
if sep2 < sep1:
raise ValueError('Invalid composite path - missing start bracket')
start = False
for c in s[sep1:]:
if c == '[':
if start:
raise ValueError('Invalid composite path - missing end bracket')
else:
start = True
elif c == ']':
if not start:
raise ValueError('Invalid composite path - duplicated end bracket')
else:
start = False
else:
if start:
if c < '0' or c > '9':
raise ValueError('Invalid composite path - indexes must be digits')
else:
raise ValueError('Invalid composite path - invalid indexes')
|
class AromaticSystem:
def __init__(self, aromatic_system_id, atoms):
self.id = aromatic_system_id
self.atoms = set(atoms)
for atom in self.atoms:
atom.aromatic_system = self
self.electrons = set()
self.bonds = set()
self.set_bonds()
for bond in self.bonds:
bond.aromatic_system = self
self.set_electrons()
def __hash__(self):
return self.id
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return f"Aromatic system {self.id}, members: {', '.join([atom.__repr__() for atom in self.atoms])}"
def set_bonds(self):
for atom_1 in self.atoms:
for atom_2 in self.atoms:
if atom_1 != atom_2:
bond = atom_1.get_bond(atom_2)
if bond:
self.bonds.add(bond)
bond.aromatic_system = self
def get_contributed_electrons(self, atom):
contributed_electrons = []
for electron in self.electrons:
if electron.atom == atom:
contributed_electrons.append(electron)
return contributed_electrons
def set_electrons(self):
for atom in self.atoms:
p_orbital = atom.get_orbitals('p')[0]
electrons_participate_in_system = True
for electron in p_orbital.electrons:
if electron.atom not in self.atoms:
electrons_participate_in_system = False
if electrons_participate_in_system:
electrons = p_orbital.electrons[:]
for electron in electrons:
p_orbital.remove_electron(electron)
self.electrons.add(electron)
def add_atom(self, atom):
self.atoms.add(atom)
atom.aromatic_system = self
def relocalise_electrons(self):
for atom in self.atoms:
p_orbital = atom.get_orbitals('p')[0]
electrons = self.get_contributed_electrons(atom)
for electron in electrons:
p_orbital.add_electron(electron)
self.remove_electron(electron)
def add_electron(self, electron):
self.electrons.add(electron)
def remove_atom(self, atom):
self.atoms.remove(atom)
atom.aromatic_system = None
def remove_electron(self, electron):
self.electrons.remove(electron)
def add_bond(self, bond):
self.bonds.add(bond)
bond.aromatic_system = self
def remove_bond(self, bond):
self.bonds.remove(bond)
bond.aromatic_system = None
|
"""
Plotting lib for regression prediction
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import pdb
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
def regression_surface_plot(Y, X1, X2, X3, ols_object, Y_label="Response Var", X1_label="X1", X2_label="X2", file_name="mesh_plot"):
"""surface plot of linear model
INPUTS
Y arr response variable
X1 arr explanatory variable 1
X2 arr explanatory variable 2
X3 arr explanatory variable 3
ols_object statsmodels OLS
Y-label str label
X1_label str label
X2_label str label
file_name str file name
OUTPUT
saves a figure
"""
X1 = X1.reshape(X1.shape[0], 1)
X2 = X2.reshape(X2.shape[0], 1)
X3 = X3.reshape(X3.shape[0], 1)
xx1, xx2 = np.meshgrid(np.linspace(X1.min(), X1.max(), 100), np.linspace(X2.min(), X2.max(), 100))
Z = ols_object.params[0] * xx1 + ols_object.params[1] * xx2 + ols_object.params[3]
#image bits
fig = plt.figure(figsize=(12,8))
ax = Axes3D(fig)
#, azim=-115, elev=15)
surf = ax.plot_surface(xx1, xx2, Z, cmap=plt.cm.RdBu_r, alpha=0.6, linewidth=0)
#residuals
resid = Y - ols_object.predict(np.hstack(( X1, X2, X3, np.ones(X1.shape)))).reshape(Y.shape[0], 1)
#above 0
ax.scatter(X1[resid>=0], X2[resid>=0], Y[resid>=0], color='black', alpha=1.0, facecolor='white')
#below 0
ax.scatter(X1[resid<0], X2[resid<0], Y[resid<0], color='black', alpha=1.0)
#labels
ax.set_xlabel(X1_label)
ax.set_ylabel(X2_label)
ax.set_zlabel(Y_label)
#saving figure
save_name = '/cbio/grlab/home/dkuo/plots/cqtl_mesh/' + file_name + '.png'
plt.savefig(save_name, dpi=200)
print("Saved a mesh plot to %s" % (save_name))
def regression_scatter(Y, X1, X2, file_name="scatter"):
"""scatter plot of linear model
INPUTS
Y arr response variable
X1 arr explanatary variable 1
X2 arr explanatary variable 2
Y_label str label
X1_label str label
X2_label str label
file_name str file suffix
"""
X1 = X1.reshape(X1.shape[0], 1)
X2 = X2.reshape(X2.shape[0], 1)
fig, ax = plt.subplots()
cax = ax.scatter(X1, X2, c=Y, cmap=plt.cm.YlGnBu, alpha=0.8, lw=0.25, edgecolor='gray')
cb = plt.colorbar(cax)
cb.set_label('Normalized Expression')
ax.set_ylabel('Normalized Methylation')
ax.set_xlabel('Mutation Status')
save_name = '/cbio/grlab/home/dkuo/plots/cqtl_mesh/' + file_name + '_scatter.png'
plt.savefig(save_name, dpi=200)
print("Saved scatter to %s" % (save_name))
plt.close()
def regression_scattermat(df, subset_hue = None, file_name="scatter_mat_temp"):
"""scatter matrix of a pandas dataframe
df data frame contains all values
subset_hue str hue for coloring
"""
twelve_class = ["#a6cee3","#1f78b4","#b2df8a","#33a02c","#fb9a99","#e31a1c","#fdbf6f","#ff7f00","#cab2d6","#6a3d9a","#ffff99","#b15928"]
sns.set_style('whitegrid')
if subset_hue in df.columns:
pair_number = np.unique(df[str(subset_hue)]).shape[0]
sns.set_palette("Paired", pair_number)
sns.pairplot(df, hue=subset_hue, size=2.5)
else:
sns.pairplot(df, size=2.5)
save_name = '/cbio/grlab/home/dkuo/plots/cqtl_mesh/' + file_name + '_scatterMatrix.png'
plt.savefig(save_name, dpi=200)
print("Saved scatter matrix to %s" % (save_name))
plt.close()
|
# Separando dígitos de um número
num = str(input('Digite um número de 0 a 9999: '))
n = num.zfill(4)
fri = f'Número Digitado: {num}'
print('')
print(f'{fri:^30}')
print('=#=' * 10)
print(f'Unidade: {n[3]}')
print(f'Dezena: {n[2]}')
print(f'Centena: {n[1]}')
print(f'Milhar: {n[0]}')
print('=#=' * 10)
'''num = int(input('Digite um número de 0 a 9999: '))
print(f'Número digitado: {num}')
u = num // 1 % 10
d = num // 10 % 10
c = num // 100 % 10
m = num // 1000 % 10
print('=#=' * 10)
print(f'Unidade: {u}')
print(f'Dezena: {d}')
print(f'Centena: {c}')
print(f'Milhar: {m}')
print('=#=' * 10)'''
|
class Literal(object):
"""
A literal value that should not be escaped when used in an XPath query.
"""
def __init__(self, value):
"""
Args:
value (str): The raw value.
"""
self.value = value
|
"""
Labels are the Base class you derive your Labels from. A few simple Labels are
provided for you.
"""
from typing import Tuple
from PIL import Image
def _coord_add(tup1, tup2):
"""add two tuples of size two"""
return (tup1[0] + tup2[0], tup1[1] + tup2[1])
class Label:
"""Base class for all labels
>>> class MyLabel(Label):
... items = [
... Text(), Text()
... ]
>>> l = MyLabel("text1", "text2")
>>> printer.print(l)
"""
items = [] # type: list
def __init__(self, *args):
if not self.items:
raise ValueError(
"A Labels 'items' attribute must contain a list of "
"renderable objects")
arg_it = iter(args)
try:
self._rendered_items = [
[item.render(next(arg_it)) for item in line]
for line in self.items]
except StopIteration:
# the argument list was exhausted before all items had a value
raise TypeError("{cls} requires {argc} arguments, but {num} were given".format(
cls=self.__class__.__name__, argc=sum(len(x) for x in self.items), num=len(args)
))
@property
def size(self) -> Tuple[int, int]:
width = max(sum(i.size[0] for i in line)
for line in self._rendered_items)
height = sum(max(i.size[1] for i in line)
for line in self._rendered_items)
return width, height
def render(self, width=None, height=None) -> Image:
"""render the Label.
Args:
width: Width request
height: Height request
"""
size = self.size
img = Image.new("1", size, "white")
pos = [0, 0]
for line in self._rendered_items:
for item in line:
box = (*pos, *_coord_add(item.size, pos))
img.paste(item, box=box)
pos[0] += item.size[0]
pos[0] = 0
pos[1] += max(i.size[1] for i in line)
xdim, ydim = img.size
print("presize", xdim, ydim, height)
xdim = round((height / ydim) * xdim)
print("calcsize", xdim, ydim)
img = img.resize((xdim, height))
return img
# print("".join(f"{x:08b}".replace("0", " ") for x in bytes(i)))
|
#! /usr/bin/env python3
#This code prints out the IP , port and port-state from a list
my_list = ["192.168.0.5", 5060, "UP" ]
print("The first item in the list (IP): " + my_list[0])
print ("The second item in the list (port): " + str(my_list[1]))
print ("The third item in the list (state): " + my_list[2])
new_list = [5060, "80", 55, "10.0.0.1", "10.20.30.1", "ssh"]
print("When I ssh into IP address", new_list[3], "or", new_list[4], "I am unable to ping ports" ) |
#!/usr/bin/env python
#
# Copyright 2013 Andy Gimma
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# System libraries.
import datetime
import jinja2
import json
import logging
import os
from google.appengine.ext.db import to_dict
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.ext.db import Query
# Local libraries.
import base
import key
import site_db
class GetEventAjaxHandler(base.AuthenticatedHandler):
def AuthenticatedGet(self, org, event):
json_string = json.dumps({
"event_name": event.short_name
})
self.response.out.write(json_string)
|
# -*- coding: utf-8 -*-
# 1. Standard library imports:
# 2. Known third party imports:
# 3. Odoo imports (openerp):
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
# 4. Imports from Odoo modules:
# 5. Local imports in the relative form:
# 6. Unknown third party imports:
class ResPartner(models.Model):
# 1. Private attributes
_inherit = 'res.partner'
# 2. Fields declaration
# 3. Default methods
# 4. Compute and search fields, in the same order that fields declaration
# 5. Constraints and onchanges
# 6. CRUD methods
# 7. Action methods
# 8. Business methods
@api.model
def do_overdue_invoices(self):
overdue_invoices_count = 0
for record in self:
overdue_invoices = self.env['account.invoice'].search([
('partner_id', '=', record.id),
('date_due', '<', 'now()')
])
for invoice in overdue_invoices:
if not invoice.overdue:
continue
invoice.action_overdue_invoice()
overdue_invoices_count += 1
return overdue_invoices_count
|
import math
import numpy as np
def build_rotation_matrix(ax, ay, az, inverse=False):
"""Build a Euler rotation matrix.
Rotation order is X, Y, Z (right-hand coordinate system).
Expected vector is [x, y, z].
Arguments:
ax {float} -- rotation angle around X (radians)
ay {float} -- rotation angle around Y (radians)
az {float} -- rotation angle around Z (radians)
Keyword Arguments:
inverse {bool} -- Do inverse rotation (default: {False})
Returns:
[numpy.array] -- rotation matrix
"""
if inverse:
ax, ay, az = -ax, -ay, -az
Rx = np.array([[1, 0, 0],
[0, np.cos(ax), -np.sin(ax)],
[0, np.sin(ax), np.cos(ax)]])
Ry = np.array([[np.cos(ay), 0, np.sin(ay)],
[0, 1, 0],
[-np.sin(ay), 0, np.cos(ay)]])
Rz = np.array([[np.cos(az), -np.sin(az), 0],
[np.sin(az), np.cos(az), 0],
[0, 0, 1]])
R = np.dot(Rz, np.dot(Ry, Rx))
return R
def rotation_matrix_to_angles(R):
"""Compute Euler angles from the given Euler rotation matrix.
Arguments:
R {array} -- Euler rotation matrix
Returns:
[array] -- Euler angles
"""
if abs(R[2, 0]) != 1:
ay = -math.asin(R[2, 0])
c = math.cos(ay)
ax = math.atan2(R[2, 1] / c, R[2, 2] / c)
az = math.atan2(R[1, 0] / c, R[0, 0] / c)
else:
az = 0
if R[2, 0] == -1:
ay = math.pi / 2.0
ax = az + math.atan2(R[0, 1], R[0, 2])
else:
ay = -math.pi / 2.0
ax = -az + math.atan2(-R[0, 1], -R[0, 2])
return np.array([ax, ay, az])
def find_relative_vector_rotation(a, b):
"""Find a Euler rotation matrix from the vector `a` to the vector `b` using Rodrigues' rotation formula.
Arguments:
a {array} -- source vector [x, y, z]
b {array} -- target vector [x, y, z]
Returns:
[numpy.array] -- rotation matrix
"""
a = a / np.linalg.norm(a)
b = b / np.linalg.norm(b)
if np.linalg.norm(a - b) < 0.001:
return np.eye(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
Im = np.identity(3)
vXStr = '{} {} {}; {} {} {}; {} {} {}'.format(0, -v[2], v[1], v[2], 0, -v[0], -v[1], v[0], 0)
k = np.array(np.matrix(vXStr))
R = Im + k + np.matmul(k, k) * ((1 - c)/(s**2))
return R
def find_relative_axes_rotation(source_axes, target_axes, validate=True):
"""Find the rotation from `source_axes` to `target_axes`.
Arguments:
source_axes {numpy.array} -- a list of vectors (x, y, z)
target_axes {numpy.array} -- a list of vectors (x, y, z)
Keyword Arguments:
validate {bool} -- validation (default: {True})
Returns:
[numpy.array] -- Euler rotation matrix, Euler angles
"""
# Convert to numpy
source_axes = np.array(source_axes)
target_axes = np.array(target_axes)
# Find rotation between coordinate systems formed by the axes
R = np.dot(target_axes.T, np.linalg.inv(source_axes.T))
angles = rotation_matrix_to_angles(R)
if validate:
R_rebuild = build_rotation_matrix(*angles)
axes = np.dot(R_rebuild, source_axes.T).T
diff = np.linalg.norm(target_axes - axes)
if diff > 1e-3:
raise RuntimeError("Found rotation angles are incorrect!"
" norm(expected_axes - computed_axes) = {}".format(diff))
return R
def scalar_projection(source, target):
"""Compute a scalar pojection of source vector to target vector
Arguments:
source {array} -- source vector
target {array} -- target vector
Returns:
[float] -- projection
"""
return np.dot(source, target) / np.linalg.norm(target)
def find_vectors_mapping(source, target):
"""Find indices mapping source vectors to the target one based on the projections
Arguments:
source {array} -- source array of vectors
target {array} -- target array of vectors
Returns:
[array] -- indices mapping source vectors to target vectors
"""
return [np.argmax([abs(scalar_projection(s, t)) for t in target]) for s in source]
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: ita
# language: python
# name: ita
# ---
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import stats
import scipy
# +
from sklearn.linear_model import *
from sklearn.ensemble import *
from sklearn.svm import *
from sklearn.neural_network import MLPRegressor
from sklearn.tree import ExtraTreeRegressor
import xgboost as xgb
from catboost import Pool, CatBoostRegressor
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_absolute_error
from data_utils import *
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, cross_validate
# +
df = pd.read_csv("../data/f_train.csv")
bruh = ["n", "b1", "b3", "g1", "l1", "l3", "e_avg", "g_l", "rf1", "volume", "floor_area", "rf3", "sd_trans", "cent_price_cor", "cent_trans_cor"]
#X = df[bruh].values
X = df[bruh].drop(["cent_price_cor", "cent_trans_cor"], axis=1).values
inputs = ["cent_price_cor", "cent_trans_cor"] # Use this for multi-output models
inputs = ["cent_trans_cor"] # Use this for single output models
y = df[inputs].values
# -
def plot_pred_ensemble(model, X):
preds = pd.DataFrame()
for name, pred in model.named_estimators_.items():
preds[name] = pred.predict(X)
preds["ensemble"] = model.predict(X)
preds.sort_values(by=["ensemble"], inplace=True)
preds = preds.apply(np.square)
fig, ax = plt.subplots(figsize=(16, 6))
plt.subplot(1, 2, 1)
for i in preds.columns:
sns.lineplot(data=preds, x=np.arange(len(X)), y=i, label=i)
plt.legend()
plt.subplot(1, 2, 2)
sns.histplot(data=preds, x="ensemble", bins=10)
plt.show()
# +
#model = MLPRegressor(4, activation="relu", solver="adam", max_iter=5000, alpha=.1, max_fun=20000)
#model = SGDRegressor(alpha=.00000)
# model = NuSVR(nu=0.4, C=300, gamma="auto")
#model = KernelRidge(alpha=1, degree=2, kernel="poly")#, gamma=.05)
#model = RandomForestRegressor(n_estimators=30, max_depth=2, random_state=1337, n_jobs=4)
#model = xgb.XGBRegressor(n_estimators=300, learning_rate=0.1, max_depth=2, reg_alpha=0.1, reg_lambda=0.1, gamma=0.3, subsample=0.9,min_child_weight=0.5, n_jobs=4)
#model = CatBoostRegressor(verbose=False)
#model = LGBMRegressor(max_bin=255, lambda_l1=0.1, lambda_l2=0.1, learning_rate=.1, num_leaves=20, bagging_freq=1, bagging_fraction=.9, max_depth=15, verbose=0)
#model = BayesianRidge()
#model = Ridge(0.1)
estimators = [ #("kernel_ridge", KernelRidge(alpha=1, degree=2, kernel="poly", gamma=.05)),
# (
# "xgb",
# xgb.XGBRegressor(
# n_estimators=1000,
# learning_rate=0.1,
# max_depth=3,
# reg_alpha=0.1,
# reg_lambda=0.1,
# gamma=0.3,
# subsample=0.9,
# min_child_weight=0.5,
# ),
# ),
#("ann", MLPRegressor(8, solver="lbfgs", max_iter=5000, alpha=0.5, learning_rate_init=0.002, max_fun=20000)),
(
"lgbm",
LGBMRegressor(
max_bin=255,
lambda_l1=0.01,
lambda_l2=0.1,
learning_rate=0.1,
num_leaves=20,
bagging_freq=1,
bagging_fraction=0.9,
max_depth=12,
),
),
("cat", CatBoostRegressor(verbose=False)),
("b_ridge", BayesianRidge()),
("ridge", Ridge(0.1)),
("lasso", Lasso())
# ("svr", NuSVR(nu=0.4, C=300, gamma="auto"))
]
model = VotingRegressor(estimators=estimators)#, weights=[0.4, 0.4, 0.2])
#model = Lasso()
# model = StackingRegressor(estimators=estimators, n_jobs=4)
# model = GradientBoostingRegressor(n_estimators=600, max_depth=2, random_state=1337)
# +
k = 10
kf = KFold(n_splits=k, shuffle=True)
scores = []
cheats = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print("mae: ", mean_absolute_error(y_test, y_pred))
scores.append(mean_absolute_error(y_test, y_pred))
plot_pred_ensemble(model, X_test) # enable this if running ensembles
print("media:", np.mean(scores))
# -
model.fit(X, y)
y_pred = model.predict(X)
print("mae: ", mean_absolute_error(y, y_pred))
df_out = pd.DataFrame(model.predict(X_test), columns=inputs)
df_out.describe()
# +
sns.histplot(data=df, x="cent_price_cor", stat="probability", bins=30, color="black")
sns.histplot(data=df, x="cent_trans_cor", stat="probability", bins=30, color="w")
for i in inputs:
sns.histplot(data=df_out, x=i, stat="probability", bins=30, color="r")
# -
|
from apriorib.apriorib import Apriori |
#!/usr/bin/env python
# THIS EXAMPLE ISN'T COMPLETE OR WORKING
# it deadlocks (don't remember)
# there's no "self" for main in python
import inspect
import sys
import os.path
sys.path[0:0] = [ os.path.join( os.path.dirname( inspect.getabsfile( inspect.currentframe() ) ), '..', '..', '..', 'lib' ) ]
sys.path[0:0] = [ os.path.join( os.path.dirname( inspect.getabsfile( inspect.currentframe() ) ), '..' ) ]
from logging import warning
import dramatis
from kid import Kid
tom = Kid( "Tom", self )
becky = Kid( "Becky", tom )
dick = Kid( "Dick", becky )
jane = Kid( "Jane", dick )
harry = Kid( "Harry", jane )
sally = Kid( "Sally", harry )
phrases = [ "his mom locked her keys in the car, " + \
"so he should get a ride home with Hector",
"Mac King is a comedy magic genius" ]
for phrase in phrases:
print "Teacher:", phrase
try:
sally.whisper( phrase )
except dramatis.Deadlock:
warning( "woah: got a deadlock: that shouln't happen" )
for phrase in phrases:
try:
print "Teacher heard:", tom.ask()
except dramatis.Deadlock:
warning( "woah: got another deadlock" )
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import zip
import os, sys
fin = open(sys.argv[1]).readlines()
template = open(sys.argv[2]).readlines()
def determine_format_string(numstr): # Returns something like "% 8.3f"
spl = numstr.split('.')
if len(spl) != 2:
return Exception("There should be exactly one decimal place in the word %s" % numstr)
decims = len(spl[1])
strlen = len(spl[0]) + decims + 1
formstr = "% "
if numstr[0] == "-":
strlen -= 1
formstr = " " + formstr
# formstr += " "
if 'e' in numstr:
strlen -= 4
decims -= 4
formstr += "%i.%ie" % (strlen, decims)
else:
formstr += "%i.%if" % (strlen, decims)
return formstr
for line_temp, line_data in zip(template, fin):
stemp = line_temp.split()
sdata = line_data.split()
line_out = line_temp
for wt, wd in zip(stemp, sdata):
if wt != wd:
#print(wt, wd)
line_out = line_out.replace(" " + wt, determine_format_string(wt) % float(wd), 1)
print(line_out, end='')
|
import sys
if sys.version_info >= (3, 4):
import acouchbase.py34only.iterator
sys.modules['acouchbase.iterator'] = acouchbase.py34only.iterator
if sys.version_info < (3,7):
import asyncio
acouchbase.py34only.iterator.AioBase.__aiter__=asyncio.coroutine(acouchbase.py34only.iterator.AioBase.__aiter__) |
"""Renders a Threes board into an ANSI string to be printed to the console."""
from colorama import Fore, Back, Style
from threes import threes_util
# Name and colors to use for printing each possible tile.
TILE_NAMES = [" ", " -1- ", " -2- "] + [
"{:^5} ".format(3 * 2 ** i) for i in range(13)
]
BACK_COLORS = [Back.BLACK, Back.BLUE, Back.RED] + [Back.WHITE] * 20
FRONT_COLORS = [Fore.WHITE, Fore.WHITE, Fore.WHITE] + [Fore.BLACK] * 20
def render_ansi(board, future_tile):
"""Generate a string for printing a Threes game state into a console."""
LEFT_MARGIN = " " * 10
to_write = "\n\n"
possible_tiles = threes_util.future_tile_possibilities(future_tile)
tile_name_line = padding_line = " " * (3 - len(possible_tiles))
# Render the "Next:" line.
for tile in possible_tiles:
back = BACK_COLORS[tile]
front = FRONT_COLORS[tile]
padding_line += " " + back + front + " " * 6 + Style.RESET_ALL
tile_name_line += " " + back + front + TILE_NAMES[tile] + Style.RESET_ALL
to_write += LEFT_MARGIN + " " + padding_line + "\n"
to_write += LEFT_MARGIN + "Next: " + tile_name_line + "\n"
to_write += LEFT_MARGIN + " " + padding_line + "\n\n\n"
# Render the lines of the board, one at a time (each board line takes
# multiple console lines).
for i, line in enumerate(board):
padding_line = LEFT_MARGIN
tiles = LEFT_MARGIN
for j, tile in enumerate(line):
back = BACK_COLORS[tile]
front = FRONT_COLORS[tile]
padding_line += back + front + " " * 6 + Style.RESET_ALL + " "
tiles += back + front + TILE_NAMES[tile] + Style.RESET_ALL + " "
to_write += padding_line + "\n" + tiles + "\n" + padding_line + "\n\n"
return to_write
|
import argparse
from src.segment import InstanceSegmenter
# construct the argument parse and parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image", required=True,
help="path to input image")
parser.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
parser.add_argument("-t", "--threshold", type=float, default=0.3,
help="minimum threshold for pixel-wise mask segmentation")
parser.add_argument("-u", "--use-gpu", type=bool, default=0,
help="boolean indicating if CUDA GPU should be used")
parser.add_argument("-e", "--iter", type=int, default=10,
help="# of GrabCut iterations "
"(larger value => slower runtime)")
args = vars(parser.parse_args())
use_gpu = args["use_gpu"]
image = args["image"]
confidence = args["confidence"]
threshold = args["threshold"]
grabcut_iter = args["iter"]
if __name__ == "__main__":
segmenter = InstanceSegmenter(use_gpu)
segmenter.segment_image(image, confidence, threshold, grabcut_iter)
|
"""
constants.py module for ocean.tests
"""
PG_DATA_DIR = 'data'
|
import datetime
import pandas as pd
class solar_data:
# def __init__(self):
# '''
# 'data' is a dataframe
# '''
# self.data_df = data
def ingest_pvwatts(self, data):
'''
data must be a dataframe object.
This function adds a datetime column
'''
self.data_df = data
self.data_df["datetime"] = self.data_df.apply( lambda row:
datetime.datetime( year=2016, month=int(row["Month"]), day=int(row["Day"]),
hour=int(row["Hour"]) ), axis=1 )
def ingest_pvwatts_csv(self, filename):
df = pd.read_csv(filename,thousands=',')
self.ingest_pvwatts(df)
def ingest_daily_production_enlightenmanager_csv(self, filename):
df = pd.read_csv(filename,thousands=',', parse_dates=[0])
df.columns = ['datetime','Wh']
self.data_df = df
##
## Helpful Notes:
#df['Date/Time'] = pva_df['Date/Time'].apply(dateutil.parser.parse)
#df['Date/Time'] = pd.to_datetime(pva_df['Date/Time'])
def export_kwatts_for_elect_rates(self):
df = pd.DataFrame()
df["datetime"] = self.data_df["datetime"]
df["Value"] = self.data_df["AC System Output (W)"] /1000.0
return df
def export_daily_energy_from_pvwatts(self):
df = pd.DataFrame()
df["datetime"] = self.data_df["datetime"]
df["Wh"] = self.data_df["AC System Output (W)"]
df.index = df.datetime
df = df.resample("D", how='sum')
return df
## TODO: use date_range() and DatetimeIndex
def export_daily_energy_from_enlightenmanager(self):
df = pd.DataFrame()
df["datetime"] = self.data_df["datetime"]
df["Wh"] = self.data_df["Wh"]
df.index = df.datetime
df = df.resample("D", how='sum')
return df
def data(self):
return self.data_df
|
a = input().split(',')
t = 0.0
for i in range(len(a)):
if int(a[i]) >=20:
t += int(a[i])
else:
t += int(a[i])*1.07
print(t)
|
# Generated by Django 2.0.3 on 2018-03-16 20:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patientbasicinfo', '0005_auto_20180316_2003'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='bloodGroup',
field=models.CharField(blank=True, max_length=3),
),
migrations.AlterField(
model_name='profile',
name='bsa',
field=models.FloatField(blank=True, default=0, max_length=10),
),
migrations.AlterField(
model_name='profile',
name='diseaseCode',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='er_pr',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='height',
field=models.FloatField(blank=True, max_length=5),
),
migrations.AlterField(
model_name='profile',
name='histopathology',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='ihc',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='ps',
field=models.CharField(blank=True, max_length=10),
),
migrations.AlterField(
model_name='profile',
name='stage',
field=models.IntegerField(blank=True, default=1),
),
migrations.AlterField(
model_name='profile',
name='tnm',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='profile',
name='weight',
field=models.FloatField(blank=True, max_length=5),
),
]
|
import torch
from torch.nn.modules.utils import _pair
from torch.nn.functional import conv2d, conv_transpose2d
from numpy import prod, sqrt
class EqualizedConv2d(torch.nn.Module):
"""
Convolutional layer with equalized learning rate as suggested in section 4.1 of
https://research.nvidia.com/sites/default/files/pubs/2017-10_Progressive-Growing-of/karras2018iclr-paper.pdf.
Scales weights according to the initialization method known as He's initializer (https://arxiv.org/abs/1502.01852),
but dynamically at runtime for each pass. This aims at counteracting the normalization of gradient updates
performed by optimization algorithms such as e.g. Adam (https://arxiv.org/pdf/1412.6980.pdf) for which the effective
step size is invariant of the weight scale. This serves the purpose of avoiding slower convergence for weights with
larger scale.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
"""
:param in_channels: number of input channels before the convolution
:param out_channels: number of output channels after the convolution
:param kernel_size: kernel size for the convolution (single value if square, tuple otherwise)
:param stride: stride for the convolution
:param padding: padding for the convolution
"""
super(EqualizedConv2d, self).__init__()
self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels, *_pair(kernel_size)))
self.stride = stride
self.padding = padding
self.bias = torch.nn.Parameter(torch.zeros(out_channels))
fan_in = prod(_pair(kernel_size)) * in_channels
self.scale = sqrt(2) / sqrt(fan_in)
def forward(self, x):
return conv2d(input=x,
weight=self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding)
class EqualizedDeconv2d(torch.nn.Module):
"""
Transpose of the convolutional layer with equalized learning rate. See EqualizedConv2d.
Used here for upsampling.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
"""
:param in_channels: number of input channels before the convolution
:param out_channels: number of output channels after the convolution
:param kernel_size: kernel size for the convolution (single value if square, tuple otherwise)
:param stride: stride for the convolution
:param padding: padding for the convolution
"""
super(EqualizedDeconv2d, self).__init__()
self.weight = torch.nn.Parameter(torch.randn(in_channels, out_channels, *_pair(kernel_size)))
self.stride = stride
self.padding = padding
self.bias = torch.nn.Parameter(torch.zeros(out_channels))
fan_in = in_channels
self.scale = sqrt(2) / sqrt(fan_in)
def forward(self, x):
return conv_transpose2d(input=x,
weight=self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding)
|
import math
import numpy as np
import torch
from utils import to_tensor, accumulate, batch_normalize
def calculate_future_rewards(rewards, gamma):
"""rewards is list of episodes where length of list is max_episode_length.
And each elements is rewards of each batches. So, shape of the rewards becomes [max_episode_length, batch_size]"""
return accumulate(rewards, gamma)
def calculate_td_errors(rewards, values, gamma):
"""rewards/values is list of reward/value where length of list is max_episode_length.
And each elements is rewards of each batches.
So, shape of the rewards becomes [max_episode_length, batch_size]"""
rewards = np.asarray(rewards) if type(rewards) is list else rewards
_, batch_size = rewards.shape
next_values = np.append(values[1:], np.zeros([1, batch_size]), axis=0)
td_errors = rewards + gamma * next_values - values
return td_errors
def calculate_gae(rewards, values, gamma, lamb, normalize=True):
td_errors = calculate_td_errors(rewards, values, gamma)
discount_rate = gamma * lamb
advantages = accumulate(td_errors, discount_rate)
future_returns = calculate_future_rewards(rewards, gamma)
if normalize:
advantages = batch_normalize(advantages)
return advantages, future_returns
class Trajectories(object):
def __init__(self, states: np.ndarray, actions: np.ndarray, old_log_probs: np.ndarray, rewards: np.ndarray):
self.states = states
self.actions = actions
self.old_log_probs = old_log_probs
self.rewards = rewards
self.states_t = to_tensor(self.states)
self.actions_t = to_tensor(self.actions)
self.old_log_probs_t = to_tensor(self.old_log_probs)
self.rewards_t = to_tensor(self.rewards)
def get_as_numpy(self):
return self.states, self.actions, self.old_log_probs, self.rewards
def get_as_tensor(self):
return self.states_t, self.actions_t, self.old_log_probs_t, self.rewards_t
def get_gae(self, values, gamma, lamb, normalize=True, as_tensor=True):
advantages, returns = calculate_gae(self.rewards, values, gamma, lamb, normalize=normalize)
advantages = to_tensor(advantages.copy()) if as_tensor else advantages
returns = to_tensor(returns.copy()) if as_tensor else returns
return advantages, returns
def total_rewards(self):
return np.sum(self.rewards, axis=0)
def __len__(self):
return len(self.states)
@property
def batch_size(self):
return self.states.shape[1]
def collect_trajectories(env, model, max_episodes_len=None):
# get the default brain
brain_name = env.brain_names[0]
max_episodes_len = max_episodes_len if max_episodes_len else math.inf
state_list = []
action_list = []
old_log_probs = []
reward_list = []
# reset the environment
env_info = env.reset()[brain_name]
is_train = model.training
model.eval()
for t in range(max_episodes_len):
# probs will only be used as the pi_old
# no gradient propagation is needed
# so we move it to the cpu
states = env_info.vector_observations
states_t = to_tensor(states)
with torch.no_grad():
actions_t, actions_log_prob_t, entropy, values = model(states_t)
actions_log_prob = actions_log_prob_t.cpu().numpy()
actions = actions_t.cpu().numpy()
env_info = env.step(actions)[brain_name]
# store the result
state_list.append(states)
action_list.append(actions)
old_log_probs.append(actions_log_prob)
reward_list.append(env_info.rewards)
# stop if any of the trajectories is done
# we want all the lists to be retangular
if np.any(env_info.local_done):
break
if is_train:
model.train()
return Trajectories(np.asarray(state_list), np.asarray(action_list), np.asarray(old_log_probs),
np.asarray(reward_list))
|
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bokeh.plotting import figure, curdoc
from bokeh.layouts import layout, row, column, gridplot
from bokeh.models.widgets import Tabs, Panel
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource, DataTable, DateFormatter, TableColumn, HTMLTemplateFormatter
from bokeh.models import Button, Div, CheckboxGroup, Range1d
from bokeh.models import HoverTool
from bokeh.models import TextInput
from bokeh.models import Paragraph, Div, CustomJS
from bokeh.events import ButtonClick
from bokeh.themes import built_in_themes
from bokeh.driving import linear
import psutil
from collections import deque
import subprocess
from functools import partial
bg_color = '#15191C'
text_color = '#E0E0E0'
##################################################
##### Platform Stat Tab ##########################
##################################################
sample_size = 60
sample_size_actual = 60
interval = 1
x = deque([0] * sample_size)
color_list = ["darkseagreen", "steelblue", "indianred", "chocolate", "mediumpurple", "rosybrown", "gold",
"mediumaquamarine"]
def get_mem(memtype):
mem_val = int(
''.join(filter(str.isdigit, str(subprocess.run(['/bin/grep', memtype, '/proc/meminfo'], capture_output=True)))))
return mem_val
def clear_min_max():
max_volt[:] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
max_temp[:] = [0, 0, 0]
min_volt[:] = [7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000]
min_temp[:] = [200, 200, 200]
global average_cpu, average_cpu_sample_size
average_cpu = 0
average_cpu_sample_size = 0
cpu_labels = [
"A-53_Core_0",
"A-53_Core_1",
"A-53_Core_2",
"A-53_Core_3",
]
cpu_data = {
'A-53_Core_0': deque([0.0] * sample_size),
'A-53_Core_1': deque([0.0] * sample_size),
'A-53_Core_2': deque([0.0] * sample_size),
'A-53_Core_3': deque([0.0] * sample_size),
}
volt_labels = [
"VCC_PSPLL",
"PL_VCCINT",
"VOLT_DDRS",
"VCC_PSINTFP",
"VCC_PS_FPD",
"PS_IO_BANK_500",
"VCC_PS_GTR",
"VTT_PS_GTR",
"Total_Volt",
]
volt_data = {
"VCC_PSPLL": deque([0] * sample_size),
"PL_VCCINT": deque([0] * sample_size),
"VOLT_DDRS": deque([0] * sample_size),
"VCC_PSINTFP": deque([0] * sample_size),
"VCC_PS_FPD": deque([0] * sample_size),
"PS_IO_BANK_500": deque([0] * sample_size),
"VCC_PS_GTR": deque([0] * sample_size),
"VTT_PS_GTR": deque([0] * sample_size),
"Total_Volt": deque([0] * sample_size),
}
temp_labels = [
"FPD_TEMP",
"LPD_TEMP",
"PL_TEMP",
]
temp_data = {
"FPD_TEMP": deque([0.0] * sample_size),
"LPD_TEMP": deque([0.0] * sample_size),
"PL_TEMP": deque([0.0] * sample_size),
}
# note that if a queue is not getting appended every sample, remove it from data structure, or
# popping the queue when updating sample size will not work!
mem_labels = [
# "MemTotal",
"MemFree",
# "MemAvailable",
# "SwapTotal",
# "SwapFree",
# "CmaTotal",
# "CmaFree",
]
mem_data = {
# "MemTotal": deque([0] * sample_size),
"MemFree": deque([0] * sample_size),
# "MemAvailable": deque([0] * sample_size),
# "SwapTotal": deque([0] * sample_size),
# "SwapFree": deque([0] * sample_size),
# "CmaTotal": deque([0] * sample_size),
# "CmaFree": deque([0] * sample_size),
}
current_data = deque([0] * sample_size)
power_data = deque([0] * sample_size)
# title
title1 = Div(
text="""<h1 style="color :""" + text_color + """; text-align :center">Kria™ SOM: Hardware Platform Statistics</h1>""",
width=550)
# average cpu display
average_cpu = 0.0
average_cpu_sample_size = 0
average_cpu_display = Div(text=str(average_cpu), width=600)
# CPU frequency display
cpu_freq_text = """<h3 style="color :""" + text_color + """;">CPU Frequencies </h3>"""
cpu_freq = [0, 0, 0, 0]
cpu_freq_display = Div(text=cpu_freq_text, width=400)
# CPU line plot
cpu_plot = figure(plot_width=800, plot_height=300, title='CPU Utilization %')
cpu_ds = [0, 0, 0, 0]
for i in range(len(cpu_labels)):
cpu_ds[i] = (cpu_plot.line(x, cpu_data[cpu_labels[i]], line_width=2,
color=color_list[i], legend_label=cpu_labels[i])).data_source
cpu_plot.legend.click_policy = "hide"
# current line plot
current_plot = figure(plot_width=500, plot_height=300, title='Total SOM Current in mA')
current_ds = (current_plot.line(x, current_data, line_width=2,
color=color_list[0], legend_label="Current")).data_source
current_plot.legend.click_policy = "hide"
# power line plot
power_plot = figure(plot_width=500, plot_height=300, title='Total SOM Power in W')
power_ds = (power_plot.line(x, power_data, line_width=2,
color=color_list[0], legend_label="Power")).data_source
power_plot.legend.click_policy = "hide"
# temperature line plot
temp_plot = figure(plot_width=500, plot_height=300, title='Temperature in Celsius')
temp_ds = [0, 0, 0, 0]
temp_ds[0] = (temp_plot.line(x, temp_data[temp_labels[0]], line_width=2,
color=color_list[0], legend_label=temp_labels[0])).data_source
temp_plot.legend.click_policy = "hide"
# table of min/max for temperature
max_temp = [0.0, 0.0, 0.0]
min_temp = [200.0, 200.0, 200.0]
min_max_temp = dict(temp_labels=temp_labels, max_temp=max_temp, min_temp=min_temp)
min_max_temp_source = ColumnDataSource(min_max_temp)
min_max_temp_column = [
TableColumn(field="temp_labels", title="Temperature"),
TableColumn(field="max_temp", title="Max"),
TableColumn(field="min_temp", title="Min")
]
temp_data_table = DataTable(source=min_max_temp_source, columns=min_max_temp_column, index_position=None,
width=400, height=200, background=bg_color, css_classes=['custom_table'])
# table of min/max for voltages
max_volt = [0, 0, 0, 0, 0, 0, 0, 0, 0]
min_volt = [7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000, 7000]
min_max_volt = dict(volt_labels=volt_labels, max_volt=max_volt, min_volt=min_volt)
min_max_volt_source = ColumnDataSource(min_max_volt)
min_max_volt_column = [
TableColumn(field="volt_labels", title="Voltage"),
TableColumn(field="max_volt", title="Max"),
TableColumn(field="min_volt", title="Min")
]
volt_data_table = DataTable(source=min_max_volt_source, columns=min_max_volt_column, index_position=None,
width=400, height=200, background=bg_color, css_classes=['custom_table'])
# memory line plot
mem_plot = figure(plot_width=800, plot_height=300, title='Total Free Memory in kB')
mem_ds = (mem_plot.line(x, mem_data["MemFree"], line_width=2,
color=color_list[0], legend_label="MemFree")).data_source
mem_plot.legend.click_policy = "hide"
# memory bar plot
mem_bar_label = ['MemUsed', 'SwapUsed', 'CMAUsed']
mem_bar_total = [0, 0, 0]
mem_bar_used = [0, 0, 0]
mem_bar_available = [0, 0, 0]
mem_bar_percent = [0.0, 0.0, 0.0]
mem_bar_dict = dict(mem_bar_label=mem_bar_label, mem_bar_total=mem_bar_total,
mem_bar_used=mem_bar_used, mem_bar_percent=mem_bar_percent,
mem_bar_available=mem_bar_available)
mem_bar_source = ColumnDataSource(mem_bar_dict)
mem_plot_hbar = figure(y_range=mem_bar_label, x_range=[0, 100], plot_width=800, plot_height=300,
title='Memory Usage in %')
mem_plot_hbar.xaxis.axis_label = '%Used'
mem_percent_ds = (mem_plot_hbar.hbar(y='mem_bar_label', right='mem_bar_percent',
tags=mem_bar_label, source=mem_bar_source,
height=.5, fill_color='steelblue',
hatch_pattern='vertical_line', hatch_weight=2, line_width=0)).data_source
hover = HoverTool(tooltips=[("Total in kB:", "@mem_bar_total"), ("Used in kB:", "@mem_bar_used")])
mem_plot_hbar.add_tools(hover)
# reset button
reset_button = Button(label="Reset Min/Max and Averages", width=200, button_type='primary')
reset_button.on_click(clear_min_max)
# sample interval
def update_interval(attr, old, new):
global interval
interval = max(float(new), 0.5)
global input_interval
input_interval.value = str(interval)
global callback
curdoc().remove_periodic_callback(callback)
callback = curdoc().add_periodic_callback(update, interval * 1000)
input_interval = TextInput(value=str(interval), title="input interval in seconds (minimal 0.5s):",
css_classes=['custom_textinput'], width=100)
input_interval.on_change('value', update_interval)
# sample size
def update_sample_size(attr, old, new):
global sample_size, sample_size_actual
new_sample_size = int(new)
if new_sample_size < sample_size_actual:
excess = sample_size_actual - new_sample_size
while excess > 0:
x.popleft();
for j in range(len(cpu_labels)):
cpu_data[cpu_labels[j]].popleft()
for j in range(len(volt_labels)):
volt_data[volt_labels[j]].popleft()
for j in range(len(temp_labels)):
temp_data[temp_labels[j]].popleft()
for j in range(len(mem_labels)):
mem_data[mem_labels[j]].popleft()
excess = excess - 1
sample_size_actual = new_sample_size
sample_size = new_sample_size
input_sample_size = TextInput(value=str(sample_size), title="Sample Size:",
css_classes=['custom_textinput'], width=100)
input_sample_size.on_change('value', update_sample_size)
time = 0
# default_data_range = cpu_plot.y_range
cpu_plot.y_range = Range1d(0, 100)
mem_plot.y_range = Range1d(0, get_mem("MemTotal"))
power_plot.y_range = Range1d(0, 6)
current_plot.y_range = Range1d(0, 1000)
temp_plot.y_range = Range1d(0, 100)
# # dynamic scaling:
# def update_scaling(attr, old, new):
# if new == [0]:
# cpu_plot.y_range = default_data_range
# cpu_plot.title.text = "name 1"
# else:
# cpu_plot.y_range = Range1d(0, 50)
# cpu_plot.title.text = "name 2"
#
# checkbox_labels = ["Enable Dynamic Y-axis Scaling"]
# checkbox_group = CheckboxGroup(labels=checkbox_labels, active=[], css_classes=['custom_textinput'],)
# checkbox_group.on_change('active', update_scaling)
@linear()
def update(step):
global time
global sample_size_actual
time = time + interval
if sample_size_actual >= sample_size:
x.popleft()
x.append(time)
read = psutil.cpu_percent(percpu=True)
average_cpu_x = 0
for j in range(len(cpu_labels)):
if sample_size_actual >= sample_size:
cpu_data[cpu_labels[j]].popleft()
cpu_data_read = read[j]
cpu_data[cpu_labels[j]].append(cpu_data_read)
cpu_ds[j].trigger('data', x, cpu_data[cpu_labels[j]])
average_cpu_x = average_cpu_x + cpu_data_read
# average CPU usage
global average_cpu_sample_size, average_cpu
average_cpu = average_cpu * average_cpu_sample_size
average_cpu_sample_size = average_cpu_sample_size + 1
average_cpu = (average_cpu + (average_cpu_x / 4)) / average_cpu_sample_size
text = """<h2 style="color :""" + text_color + """;">""" + \
" Average CPU utilization over last " + str(average_cpu_sample_size) + \
" samples is " + str(round(average_cpu, 2)) + """%</h2>"""
average_cpu_display.text = text
# CPU frequency
cpu_freq = []
for j in range(4):
cpu_freq.append(open('/sys/devices/system/cpu/cpu' + str(j) + '/cpufreq/cpuinfo_cur_freq', 'r').read())
cpu_freq_display.text = cpu_freq_text + """<p style="color :""" + text_color + """;"> CPU0:""" + cpu_freq[0] + \
"MHz<br> CPU1:" + cpu_freq[1] + \
"MHz<br> CPU2:" + cpu_freq[2] + \
"MHz<br> CPU3:" + cpu_freq[3] + "MHz"
volts = []
for j in range(len(volt_labels) - 1):
volts.append(open('/sys/class/hwmon/hwmon0/in' + str(j + 1) + '_input', 'r').read())
volts = [j.replace('\n', '') for j in volts]
volts.append(int((open('/sys/class/hwmon/hwmon1/in1_input', 'r').read()).replace('\n', '')))
for j in range(len(volt_labels)):
if sample_size_actual >= sample_size:
volt_data[volt_labels[j]].popleft()
volt_read = int(volts[j])
volt_data[volt_labels[j]].append(volt_read)
if (volt_read < min_volt[j]) or (volt_read > max_volt[j]):
min_volt[j] = min(min_volt[j], int(volts[j]))
max_volt[j] = max(max_volt[j], int(volts[j]))
volt_data_table.source.trigger('data', volt_data_table.source, volt_data_table.source)
temperatures = []
for j in range(len(temp_labels)):
temperatures.append(open('/sys/class/hwmon/hwmon0/temp' + str(j + 1) + '_input', 'r').read())
temperatures = [j.replace('\n', '') for j in temperatures]
for j in range(len(temp_labels)):
if sample_size_actual >= sample_size:
temp_data[temp_labels[j]].popleft()
temperature_read = (float(temperatures[j])) / 1000
temp_data[temp_labels[j]].append(temperature_read)
if (temperature_read < min_temp[j]) or (temperature_read > max_temp[j]):
min_temp[j] = min(min_temp[j], temperature_read)
max_temp[j] = max(max_temp[j], temperature_read)
temp_data_table.source.trigger('data', temp_data_table.source, temp_data_table.source)
temp_ds[0].trigger('data', x, temp_data[temp_labels[0]])
ina260_current = (open('/sys/class/hwmon/hwmon1/curr1_input', 'r').read()).replace('\n', '')
if sample_size_actual >= sample_size:
current_data.popleft()
current_data.append(int(ina260_current))
current_ds.trigger('data', x, current_data)
ina260_power = int((open('/sys/class/hwmon/hwmon1/power1_input', 'r').read()).replace('\n', '')) / 1000000
if sample_size_actual >= sample_size:
power_data.popleft()
power_data.append(ina260_power)
power_ds.trigger('data', x, power_data)
# Mem line chart
mem_num = get_mem("MemFree")
if sample_size_actual >= sample_size:
mem_data["MemFree"].popleft()
mem_data["MemFree"].append(mem_num)
mem_ds.trigger('data', x, mem_data["MemFree"])
# Memory usage Horizontal bar chart
mem_bar_total[0] = get_mem('MemTotal')
mem_bar_available[0] = get_mem('MemAvailable')
mem_bar_used[0] = mem_bar_total[0] - mem_bar_available[0]
mem_bar_percent[0] = 100 * mem_bar_used[0] / max(mem_bar_total[0], 1)
mem_bar_total[1] = get_mem('SwapTotal')
mem_bar_available[1] = get_mem('SwapFree')
mem_bar_used[1] = mem_bar_total[1] - mem_bar_available[1]
mem_bar_percent[1] = 100 * mem_bar_used[1] / max(mem_bar_total[1], 1)
mem_bar_total[2] = get_mem('CmaTotal')
mem_bar_available[2] = get_mem('CmaFree')
mem_bar_used[2] = mem_bar_total[2] - mem_bar_available[2]
mem_bar_percent[2] = 100 * mem_bar_used[2] / max(mem_bar_total[2], 1)
mem_percent_ds.trigger('data', mem_bar_label, mem_bar_percent)
if sample_size_actual < sample_size:
sample_size_actual = sample_size_actual + 1
# margin: Margin-Top, Margin-Right, Margin-Bottom and Margin-Left
user_interface = column(reset_button, input_sample_size, input_interval, #checkbox_group,
background=bg_color,
margin=(50, 50, 50, 100))
cpu_freq_block = column(cpu_freq_display,
background=bg_color,
margin=(0, 0, 0, 100))
layout1 = layout(column(row(title1, align='center'),
average_cpu_display,
row(cpu_plot, user_interface, cpu_freq_block, background=bg_color),
row(mem_plot, mem_plot_hbar, background=bg_color),
row(power_plot, current_plot, temp_plot, background=bg_color),
row(volt_data_table, temp_data_table, background=bg_color),
background=bg_color))
# Add a periodic callback to be run every 1000 milliseconds
callback = curdoc().add_periodic_callback(update, interval * 1000)
##################################################
##### Application Cockpit Tab ####################
##################################################
title2 = Div(
text="""<h1 style="color :""" + text_color + """; text-align :center">Kria™ SOM: Application Cockpit</h1>""",
width=500)
def xmutil_unloadapp():
if current_command:
terminate_app()
subprocess.run(["sudo", "xmutil", "unloadapp"])
draw_apps()
#draw_app_run_buttons()
layout2.children[4] = column(load_buttons, margin=(0, 0, 0, 50))
layout2.children[1] = active_app_print
#layout2.children[2] = row(run_buttons)
unload_button = Button(label="Unloadapp", width=600, button_type='primary')
unload_button.on_click(xmutil_unloadapp)
# Apps!!!!!###########################################################################################################
def xmutil_loadapp(app_name):
if current_command:
print("\nError: unexpected command:", current_command, "\n")
command = str('sudo xmutil loadapp ' + app_name)
subprocess.run(command, shell=True, capture_output=True)
draw_apps()
#draw_app_run_buttons()
layout2.children[4] = column(load_buttons, margin=(0, 0, 0, 50))
layout2.children[1] = active_app_print
#layout2.children[2] = row(run_buttons)
# list out applications - currently listpackage doesnt return stdout correctly, temporarily use a fixed string for dev
# listapp_output = subprocess.run(['sudo dfx-mgr-client -listPackage | grep kv260'], shell=True, stdout=subprocess.PIPE)
# print("list app output", listapp_output.stdout)
load_buttons = []
active_app_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :center">Active Accelerator: None</h2>""",
width=600)
active_app = "None"
def draw_apps():
global load_buttons
global active_app_print
global active_app
active_app = "None"
listapp_output = subprocess.run(['sudo dfx-mgr-client -listPackage'], shell=True,
stdout=subprocess.PIPE).stdout.decode("utf-8")
print("\n", listapp_output, "\n")
listapp = listapp_output.split("\n")
apps = []
load_buttons = []
for i in range(len(listapp) - 1):
x = listapp[i].split()
print("\n x is ", x, " i is ", i, "\n")
if x and x[0] != "Accelerator":
apps.append(x[0])
if x[4] != "-1":
active_app = x[0]
active_app_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :center">Active Accelerator: """ + active_app + """</h2>""",
width=600)
for i in range(len(apps)):
load_buttons.append(Button(label=apps[i], width=300, button_type='primary'))
if active_app != "None":
if apps[i] == active_app:
load_buttons[i].button_type = 'success'
load_buttons[i].js_on_click(
CustomJS(code='alert("This Accelerator is already loaded, Unloadapp first!");'))
else:
load_buttons[i].button_type = 'default'
load_buttons[i].js_on_click(CustomJS(code='alert("Unloadapp First!");'))
else:
load_buttons[i].on_click(partial(xmutil_loadapp, app_name=apps[i]))
app_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :left">Available Accelerated Applications on
target to load</h2><h4 style="color :""" + text_color + """; text-align :left">  Blue - click
to load, Green - Loaded Accelerator, White - available to load after unloading</h4>""", width=1600)
draw_apps()
current_command = None
def terminate_app():
global current_command
current_command.terminate()
current_command = None
def run_app(run_command):
global current_command
if current_command:
terminate_app()
print("run_command:", run_command, "\n\n")
current_command = subprocess.Popen(run_command, shell=True)
print("\n\ncurrent command: ", current_command, "\n\n")
# run_buttons = []
#
#
# def draw_app_run_buttons():
# global run_buttons
# global active_app
# run_buttons = []
# if active_app == "None":
# return
# less_cmd = 'less som_dashboard/commands/' + active_app + '_cmds.txt'
# print(less_cmd)
# less_return = subprocess.run(less_cmd, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# run_commands_txt = less_return.stdout.decode("utf-8")
# if "No such file" in run_commands_txt:
# return
# run_commands = run_commands_txt.split('\n')
# for commands in run_commands:
# x = commands.split(',')
# button = Button(label=x[0], width=300, button_type='primary')
# button.on_click(partial(run_app, run_command=x[1]))
# run_buttons.append(button)
#
#
# draw_app_run_buttons()
# packages!!###########################################################################################################
package_print = Div(
text="""<h2 style="color :""" + text_color + """; text-align :center">Available Accelerated Application
Packages, click button below to download and DNF install the chosen package</h2>""", width=1600)
def dnf_install(app_name):
command = str('sudo dnf install ' + app_name + " -y")
print("execute command: ", command)
subprocess.call(command, shell=True)
print("finished command: ", command)
draw_pkgs()
layout2.children[6] = column(pkgs_buttons, margin=(0, 0, 0, 50))
draw_apps()
layout2.children[4] = column(load_buttons, margin=(0, 0, 0, 50))
pkgs_buttons = []
def draw_pkgs():
global pkgs_buttons
# subprocess.run(['sudo dnf update'], shell=True)
# subprocess.run(['sudo dnf clean all'], shell=True)
getpkgs_output = subprocess.run(['sudo xmutil getpkgs | grep packagegroup-kv260'], shell=True,
stdout=subprocess.PIPE).stdout.decode("utf-8")
print("getpkgs_output", getpkgs_output)
list_pkgs = getpkgs_output.split("\n")
pkgs_buttons = []
for i in range(len(list_pkgs) - 1):
x = list_pkgs[i].split()
pkgs_buttons.append(Button(label=x[0], width=300, button_type='primary'))
pkgs_buttons[i].on_click(partial(dnf_install, app_name=x[0]))
draw_pkgs()
app_print2 = Div(
text="""<h3 style="color :""" + text_color + """; text-align :center">To execute application, use command
line or start Jupyter lab and use Jupyter notebooks. </h3>""", width=1600)
layout2 = layout([
row(title2, align='center'), # 0
[active_app_print], # 1
# row(run_buttons), # 2
column(unload_button, margin=(0, 0, 0, 50)), # 2
[app_print], # 3
column(load_buttons, margin=(0, 0, 0, 50)), # 4
[package_print], # 5
column(pkgs_buttons, margin=(0, 0, 0, 50)), # 6
row(app_print2, margin=(100, 0, 400, 0))
])
layout2.background = bg_color
##################################################
##### Group Tabs ##########################
##################################################
curdoc().theme = 'dark_minimal'
tab1 = Panel(child=layout1, title="Platform Statistic Dashboard")
tab2 = Panel(child=layout2, title="Application Cockpit")
tabs = Tabs(tabs=[tab1, tab2])
curdoc().add_root(tabs)
|
# Generated by Django 2.1.2 on 2018-10-14 19:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('travel', '0004_auto_20180306_1507'),
]
operations = [
migrations.CreateModel(
name='ExternalReference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.CharField(max_length=255)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travel.TravelEntity')),
],
),
migrations.CreateModel(
name='ExternalSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='TravelAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=50)),
('alias', models.CharField(max_length=255)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travel.TravelEntity')),
],
),
migrations.CreateModel(
name='TravelClassification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travel.TravelEntityType')),
],
),
migrations.CreateModel(
name='TravelRegion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('un_code', models.CharField(db_index=True, max_length=5)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='travel.TravelRegion')),
],
),
migrations.AddField(
model_name='externalreference',
name='source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='travel.ExternalSource'),
),
migrations.AddField(
model_name='travelentity',
name='classification',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='travel.TravelClassification'),
),
migrations.AddField(
model_name='travelentityinfo',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='travel.TravelRegion'),
),
]
|
# Generated by Django 2.2.17 on 2021-06-14 12:34
from typing import Optional
from django.db import migrations
from django.utils import timezone
AGREE = "agree"
DECLINE = "decline"
def reconsent(Consent, old_consent, term_option):
old_consent.archived_at = timezone.now()
old_consent.save()
Consent.objects.create(
term_id=term_option.term_id,
term_option=term_option,
person_id=old_consent.person_id,
)
def copy_privacy_policy(apps, schema_editor):
Consent = apps.get_model("consents", "Consent")
TermOption = apps.get_model("consents", "TermOption")
term_slug = "privacy-policy"
try:
privacy_policy_agree = TermOption.objects.get(
term__slug=term_slug, archived_at=None
)
except TermOption.DoesNotExist:
return
for old_consent in Consent.objects.filter(
term__slug=term_slug, archived_at=None
).select_related("person"):
if old_consent.person.data_privacy_agreement:
reconsent(Consent, old_consent, privacy_policy_agree)
def copy_may_contact(apps, schema_editor):
Consent = apps.get_model("consents", "Consent")
TermOption = apps.get_model("consents", "TermOption")
term_slug = "may-contact"
try:
options = TermOption.objects.filter(term__slug=term_slug, archived_at=None)
may_contact_agree = [
option for option in options if option.option_type == AGREE
][0]
may_contact_disagree = [
option for option in options if option.option_type == DECLINE
][0]
except IndexError:
return
for old_consent in Consent.objects.filter(
term__slug=term_slug, archived_at=None
).select_related("person"):
if old_consent.person.may_contact:
reconsent(Consent, old_consent, may_contact_agree)
else:
reconsent(Consent, old_consent, may_contact_disagree)
def copy_public_profile(apps, schema_editor):
Consent = apps.get_model("consents", "Consent")
TermOption = apps.get_model("consents", "TermOption")
term_slug = "public-profile"
try:
options = TermOption.objects.filter(term__slug=term_slug, archived_at=None)
public_profile_agree = [
option for option in options if option.option_type == AGREE
][0]
public_profile_disagree = [
option for option in options if option.option_type == DECLINE
][0]
except IndexError:
return
for old_consent in Consent.objects.filter(
term__slug=term_slug, archived_at=None
).select_related("person"):
if old_consent.person.publish_profile:
reconsent(Consent, old_consent, public_profile_agree)
else:
reconsent(Consent, old_consent, public_profile_disagree)
def copy_may_publish_name(apps, schema_editor):
Consent = apps.get_model("consents", "Consent")
TermOption = apps.get_model("consents", "TermOption")
term_slug = "may-publish-name"
try:
options = TermOption.objects.filter(term__slug=term_slug, archived_at=None)
may_publish_name_github = [
option
for option in options
if "only use my GitHub Handle" in option.content
][0]
may_publish_name_profile = [
option
for option in options
if "use the name associated with my profile" in option.content
][0]
may_publish_name_orcid = [
option
for option in options
if "use the name associated with my ORCID profile" in option.content
][0]
may_publish_name_disagree = [
option for option in options if option.option_type == DECLINE
][0]
except IndexError:
return
for old_consent in Consent.objects.filter(
term__slug=term_slug, archived_at=None
).select_related("person"):
old_option = old_consent.person.lesson_publication_consent
option: Optional[TermOption] = None
if old_option == "yes-profile":
option = may_publish_name_profile
if old_option == "yes-orcid":
option = may_publish_name_orcid
if old_option == "yes-github":
option = may_publish_name_github
if old_option == "no":
option = may_publish_name_disagree
if option:
reconsent(Consent, old_consent, option)
class Migration(migrations.Migration):
dependencies = [
("consents", "0005_auto_20210411_2325"),
]
operations = [
migrations.RunPython(copy_privacy_policy, migrations.RunPython.noop),
migrations.RunPython(copy_may_contact, migrations.RunPython.noop),
migrations.RunPython(copy_public_profile, migrations.RunPython.noop),
migrations.RunPython(copy_may_publish_name, migrations.RunPython.noop),
]
|
#!/bin/env python
# -*- coding: utf-8 -*-
##
# test_job.py: Checks correctness of azure.quantum.job module.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
## IMPORTS ##
import unittest
import json
import uuid
import os
import time
from azure.quantum import Workspace
from azure.quantum.optimization import Problem
from azure.quantum.optimization.solvers import SimulatedAnnealing
from azure.quantum import Job
from tests.quantum_test_base import QuantumTestBase
class TestJob(QuantumTestBase):
"""TestJob
Tests the azure.quantum.job module.
"""
mock_create_job_id_name = "create_job_id"
create_job_id = Job.create_job_id
def get_dummy_job_id(self):
if self.in_recording or self.is_live:
return Job.create_job_id()
return self.dummy_uid
def test_job_refresh(self):
ws = self.create_workspace()
problem = Problem(name="test")
count = 4
for i in range(count):
problem.add_term(c=i, indices=[i, i+1])
with unittest.mock.patch.object(Job, self.mock_create_job_id_name, return_value=self.get_dummy_job_id()):
solver = SimulatedAnnealing(ws)
job = solver.submit(problem)
job.refresh()
def test_job_has_completed(self):
ws = self.create_workspace()
problem = Problem(name="test")
count = 4
for i in range(count):
problem.add_term(c=i, indices=[i, i+1])
with unittest.mock.patch.object(Job, self.mock_create_job_id_name, return_value=self.get_dummy_job_id()):
solver = SimulatedAnnealing(ws)
job = solver.submit(problem)
self.assertEqual(False, job.has_completed())
if self.in_recording:
time.sleep(3)
job.get_results()
self.assertEqual(True, job.has_completed())
def test_job_wait_unit_completed(self):
ws = self.create_workspace()
problem = Problem(name="test")
count = 4
for i in range(count):
problem.add_term(c=i, indices=[i, i+1])
with unittest.mock.patch.object(Job, self.mock_create_job_id_name, return_value=self.get_dummy_job_id()):
solver = SimulatedAnnealing(ws)
job = solver.submit(problem)
if self.in_recording:
time.sleep(3)
job.wait_until_completed()
self.assertEqual(True, job.has_completed())
def test_job_get_results(self):
ws = self.create_workspace()
problem = Problem(name="test")
count = 4
for i in range(count):
problem.add_term(c=i, indices=[i, i+1])
with unittest.mock.patch.object(Job, self.mock_create_job_id_name, return_value=self.get_dummy_job_id()):
solver = SimulatedAnnealing(ws)
job = solver.submit(problem)
if self.in_recording:
time.sleep(3)
actual = job.get_results()
expected = {
'configuration': {'0': 1, '1': 1, '2': -1, '3': 1, '4': -1},
'cost': -6.0,
'parameters': {'beta_start': 0.2, 'beta_stop': 1.9307236000000003, 'restarts': 360, 'sweeps': 50},
}
self.assertEqual(expected["configuration"], actual["configuration"])
self.assertEqual(expected["cost"], actual["cost"])
self.assertEqual(expected["parameters"], actual["parameters"])
if __name__ == "__main__":
unittest.main()
|
import tensorflow as tf
distr = tf.contrib.distributions
# import numpy as np
# from tqdm import tqdm
# import os
# import matplotlib.pyplot as plt
# from datetime import timedelta
#
# import time
# Embed input sequence [batch_size, seq_length, from_] -> [batch_size, seq_length, to_]
def embed_seq(input_seq, from_, to_, is_training, BN=True, initializer=tf.contrib.layers.xavier_initializer()):
with tf.variable_scope("embedding"): # embed + BN input set
W_embed = tf.get_variable("weights", [1, from_, to_], initializer=initializer)
embedded_input = tf.nn.conv1d(input_seq, W_embed, 1, "VALID", name="embedded_input")
if BN:
embedded_input = tf.layers.batch_normalization(embedded_input, axis=2, training=True, name='layer_norm', reuse=None)
return embedded_input
# Apply multihead attention to a 3d tensor with shape [batch_size, seq_length, n_hidden].
# Attention size = n_hidden should be a multiple of num_head
# Returns a 3d tensor with shape of [batch_size, seq_length, n_hidden]
def multihead_attention(inputs, num_units=None, num_heads=16, dropout_rate=0.1, is_training=True):
with tf.variable_scope("multihead_attention", reuse=None):
# Linear projections
Q = tf.layers.dense(inputs, num_units, activation=tf.nn.relu) # [batch_size, seq_length, n_hidden]
K = tf.layers.dense(inputs, num_units, activation=tf.nn.relu) # [batch_size, seq_length, n_hidden]
V = tf.layers.dense(inputs, num_units, activation=tf.nn.relu) # [batch_size, seq_length, n_hidden]
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # [batch_size, seq_length, n_hidden/num_heads]
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # [batch_size, seq_length, n_hidden/num_heads]
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # [batch_size, seq_length, n_hidden/num_heads]
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # num_heads*[batch_size, seq_length, seq_length]
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Activation
outputs = tf.nn.softmax(outputs) # num_heads*[batch_size, seq_length, seq_length]
# Dropouts
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training)
# Weighted sum
outputs = tf.matmul(outputs, V_) # num_heads*[batch_size, seq_length, n_hidden/num_heads]
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2) # [batch_size, seq_length, n_hidden]
# Residual connection
outputs += inputs # [batch_size, seq_length, n_hidden]
# Normalize
outputs = tf.layers.batch_normalization(outputs, axis=2, training=True, name='ln', reuse=None) # [batch_size, seq_length, n_hidden]
return outputs
# Apply point-wise feed forward net to a 3d tensor with shape [batch_size, seq_length, n_hidden]
# Returns: a 3d tensor with the same shape and dtype as inputs
def feedforward(inputs, num_units=[2048, 512], is_training=True):
with tf.variable_scope("ffn", reuse=None):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, "activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, "activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Residual connection
outputs += inputs
# Normalize
outputs = tf.layers.batch_normalization(outputs, axis=2, training=True, name='ln', reuse=None) # [batch_size, seq_length, n_hidden]
return outputs
# Encode input sequence [batch_size, seq_length, n_hidden] -> [batch_size, seq_length, n_hidden]
def encode_seq(input_seq, input_dim, num_stacks, num_heads, num_neurons, is_training, dropout_rate=0.):
with tf.variable_scope("stack"):
for i in range(num_stacks): # block i
with tf.variable_scope("block_{}".format(i)): # Multihead Attention + Feed Forward
input_seq = multihead_attention(input_seq, num_units=input_dim, num_heads=num_heads, dropout_rate=dropout_rate, is_training=is_training)
input_seq = feedforward(input_seq, num_units=[num_neurons, input_dim], is_training=is_training)
return input_seq # encoder_output is the ref for actions [Batch size, Sequence Length, Num_neurons]
|
import math
from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import ObjSpace, W_Root, NoneNotWrapped
class State:
def __init__(self, space):
self.w_e = space.wrap(math.e)
self.w_pi = space.wrap(math.pi)
def get(space):
return space.fromcache(State)
def math1(space, f, x):
try:
y = f(x)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("math range error"))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("math domain error"))
return space.wrap(y)
math1._annspecialcase_ = 'specialize:arg(1)'
def math1_w(space, f, x):
try:
r = f(x)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("math range error"))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("math domain error"))
return r
math1_w._annspecialcase_ = 'specialize:arg(1)'
def math2(space, f, x, snd):
try:
r = f(x, snd)
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap("math range error"))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap("math domain error"))
return space.wrap(r)
math2._annspecialcase_ = 'specialize:arg(1)'
def pow(space, x, y):
"""pow(x,y)
Return x**y (x to the power of y).
"""
return math2(space, math.pow, x, y)
pow.unwrap_spec = [ObjSpace, float, float]
def cosh(space, x):
"""cosh(x)
Return the hyperbolic cosine of x.
"""
return math1(space, math.cosh, x)
cosh.unwrap_spec = [ObjSpace, float]
def ldexp(space, x, i):
"""ldexp(x, i) -> x * (2**i)
"""
return math2(space, math.ldexp, x, i)
ldexp.unwrap_spec = [ObjSpace, float, int]
def hypot(space, x, y):
"""hypot(x,y)
Return the Euclidean distance, sqrt(x*x + y*y).
"""
return math2(space, math.hypot, x, y)
hypot.unwrap_spec = [ObjSpace, float, float]
def tan(space, x):
"""tan(x)
Return the tangent of x (measured in radians).
"""
return math1(space, math.tan, x)
tan.unwrap_spec = [ObjSpace, float]
def asin(space, x):
"""asin(x)
Return the arc sine (measured in radians) of x.
"""
return math1(space, math.asin, x)
asin.unwrap_spec = [ObjSpace, float]
def fabs(space, x):
"""fabs(x)
Return the absolute value of the float x.
"""
return math1(space, math.fabs, x)
fabs.unwrap_spec = [ObjSpace, float]
def floor(space, x):
"""floor(x)
Return the floor of x as a float.
This is the largest integral value <= x.
"""
return math1(space, math.floor, x)
floor.unwrap_spec = [ObjSpace, float]
def sqrt(space, x):
"""sqrt(x)
Return the square root of x.
"""
return math1(space, math.sqrt, x)
sqrt.unwrap_spec = [ObjSpace, float]
def frexp(space, x):
"""frexp(x)
Return the mantissa and exponent of x, as pair (m, e).
m is a float and e is an int, such that x = m * 2.**e.
If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
"""
mant, expo = math1_w(space, math.frexp, x)
return space.newtuple([space.wrap(mant), space.wrap(expo)])
frexp.unwrap_spec = [ObjSpace, float]
degToRad = math.pi / 180.0
def degrees(space, x):
"""degrees(x) -> converts angle x from radians to degrees
"""
return space.wrap(x / degToRad)
degrees.unwrap_spec = [ObjSpace, float]
def _log_any(space, w_x, base):
# base is supposed to be positive or 0.0, which means we use e
try:
if space.is_true(space.isinstance(w_x, space.w_long)):
# special case to support log(extremely-large-long)
num = space.bigint_w(w_x)
result = num.log(base)
else:
x = space.float_w(w_x)
if base == 10.0:
result = math.log10(x)
else:
result = math.log(x)
if base != 0.0:
den = math.log(base)
result /= den
except OverflowError:
raise OperationError(space.w_OverflowError,
space.wrap('math range error'))
except ValueError:
raise OperationError(space.w_ValueError,
space.wrap('math domain error'))
return space.wrap(result)
def log(space, w_x, w_base=NoneNotWrapped):
"""log(x[, base]) -> the logarithm of x to the given base.
If the base not specified, returns the natural logarithm (base e) of x.
"""
if w_base is None:
base = 0.0
else:
base = space.float_w(w_base)
if base <= 0.0:
# just for raising the proper errors
return math1(space, math.log, base)
return _log_any(space, w_x, base)
log.unwrap_spec = [ObjSpace, W_Root, W_Root]
def log10(space, w_x):
"""log10(x) -> the base 10 logarithm of x.
"""
return _log_any(space, w_x, 10.0)
log10.unwrap_spec = [ObjSpace, W_Root]
def fmod(space, x, y):
"""fmod(x,y)
Return fmod(x, y), according to platform C. x % y may differ.
"""
return math2(space, math.fmod, x, y)
fmod.unwrap_spec = [ObjSpace, float, float]
def atan(space, x):
"""atan(x)
Return the arc tangent (measured in radians) of x.
"""
return math1(space, math.atan, x)
atan.unwrap_spec = [ObjSpace, float]
def ceil(space, x):
"""ceil(x)
Return the ceiling of x as a float.
This is the smallest integral value >= x.
"""
return math1(space, math.ceil, x)
ceil.unwrap_spec = [ObjSpace, float]
def sinh(space, x):
"""sinh(x)
Return the hyperbolic sine of x.
"""
return math1(space, math.sinh, x)
sinh.unwrap_spec = [ObjSpace, float]
def cos(space, x):
"""cos(x)
Return the cosine of x (measured in radians).
"""
return math1(space, math.cos, x)
cos.unwrap_spec = [ObjSpace, float]
def tanh(space, x):
"""tanh(x)
Return the hyperbolic tangent of x.
"""
return math1(space, math.tanh, x)
tanh.unwrap_spec = [ObjSpace, float]
def radians(space, x):
"""radians(x) -> converts angle x from degrees to radians
"""
return space.wrap(x * degToRad)
radians.unwrap_spec = [ObjSpace, float]
def sin(space, x):
"""sin(x)
Return the sine of x (measured in radians).
"""
return math1(space, math.sin, x)
sin.unwrap_spec = [ObjSpace, float]
def atan2(space, y, x):
"""atan2(y, x)
Return the arc tangent (measured in radians) of y/x.
Unlike atan(y/x), the signs of both x and y are considered.
"""
return math2(space, math.atan2, y, x)
atan2.unwrap_spec = [ObjSpace, float, float]
def modf(space, x):
"""modf(x)
Return the fractional and integer parts of x. Both results carry the sign
of x. The integer part is returned as a real.
"""
frac, intpart = math1_w(space, math.modf, x)
return space.newtuple([space.wrap(frac), space.wrap(intpart)])
modf.unwrap_spec = [ObjSpace, float]
def exp(space, x):
"""exp(x)
Return e raised to the power of x.
"""
return math1(space, math.exp, x)
exp.unwrap_spec = [ObjSpace, float]
def acos(space, x):
"""acos(x)
Return the arc cosine (measured in radians) of x.
"""
return math1(space, math.acos, x)
acos.unwrap_spec = [ObjSpace, float]
|
import ConfigParser
import unittest
import telegram
import telegram_commands.mc as mc
class TestMC(unittest.TestCase):
def test_mc(self):
keyConfig = ConfigParser.ConfigParser()
keyConfig.read(["keys.ini", "..\keys.ini"])
bot = telegram.Bot(keyConfig.get('Telegram', 'TELE_BOT_ID'))
chatId = keyConfig.get('BotAdministration', 'TESTING_PRIVATE_CHAT_ID')
mc.run(bot, chatId, 'Admin', keyConfig, '')
|
#!/usr/bin/env python3
import os
from os import path
import subprocess
def run_command(args, dry_run=False, verbose=False):
if dry_run or verbose:
print(" ".join(args))
if not dry_run:
subprocess.run(args, check=True)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Update goose output from goose tests and go-nfsd"
)
parser.add_argument(
"--compile", help="also compile and install goose", action="store_true"
)
parser.add_argument(
"-n",
"--dry-run",
help="print commands without running them",
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
help="print commands in addition to running them",
action="store_true",
)
parser.add_argument(
"--goose",
help="path to goose repo",
required=True,
metavar="GOOSE_PATH",
)
parser.add_argument(
"--goose-examples",
help="also translate tests in Goose",
action="store_true",
)
parser.add_argument(
"--nfsd",
help="path to go-nfsd repo (skip translation if not provided)",
metavar="GO_NFSD_PATH",
default=None,
)
parser.add_argument(
"--journal",
help="path to go-journal repo (skip translation if not provided)",
metavar="GO_JOURNAL_PATH",
default=None,
)
parser.add_argument(
"--examples",
help="path to perennial-examples repo (skip translation if not provided)",
metavar="PERENNIAL_EXAMPLES_PATH",
default=None,
)
parser.add_argument(
"--distributed-examples",
help="path to lockservice repo (skip translation if not provided)",
metavar="DISTRIBUTED_EXAMPLES_PATH",
default=None,
)
parser.add_argument(
"--marshal",
help="path to marshal repo (skip translation if not provided)",
metavar="MARSHAL_PATH",
default=None,
)
parser.add_argument(
"--std",
help="path to goose-lang/std repo (skip translation if not provided)",
metavar="STD_PATH",
default=None,
)
parser.add_argument(
"--gokv",
help="path to gokv repo (skip translation if not provided)",
metavar="GOKV_PATH",
default=None,
)
parser.add_argument(
"--mvcc",
help="path to go-mvcc repo (skip translation if not provided)",
metavar="MVCC_PATH",
default=None,
)
args = parser.parse_args()
goose_dir = args.goose
go_nfsd_dir = args.nfsd
journal_dir = args.journal
perennial_dir = path.join(path.dirname(os.path.realpath(__file__)), "..")
examples_dir = args.examples
distributed_dir = args.distributed_examples
gokv_dir = args.gokv
mvcc_dir = args.mvcc
marshal_dir = args.marshal
std_dir = args.std
if not os.path.isdir(goose_dir):
parser.error("goose directory does not exist")
if go_nfsd_dir is not None and not os.path.isdir(go_nfsd_dir):
parser.error("go-nfsd directory does not exist")
if journal_dir is not None and not os.path.isdir(journal_dir):
parser.error("go-journal directory does not exist")
if examples_dir is not None and not os.path.isdir(examples_dir):
parser.error("perennial-examples directory does not exist")
if distributed_dir is not None and not os.path.isdir(distributed_dir):
parser.error(
"lockservice (distributed examples) directory does not exist"
)
if gokv_dir is not None and not os.path.isdir(gokv_dir):
parser.error("gokv directory does not exist")
if mvcc_dir is not None and not os.path.isdir(mvcc_dir):
parser.error("mvcc directory does not exist")
if marshal_dir is not None and not os.path.isdir(marshal_dir):
parser.error("marshal directory does not exist")
if std_dir is not None and not os.path.isdir(std_dir):
parser.error("std directory does not exist")
do_run = lambda cmd_args: run_command(
cmd_args, dry_run=args.dry_run, verbose=args.verbose
)
def compile_goose():
old_dir = os.getcwd()
os.chdir(goose_dir)
do_run(["go", "install", "./cmd/goose"])
os.chdir(old_dir)
def run_goose(src_path, *pkgs):
if not pkgs:
pkgs = ["."]
gopath = os.getenv("GOPATH", default=None)
if gopath is None or gopath == "":
gopath = path.join(path.expanduser("~"), "go")
goose_bin = path.join(gopath, "bin", "goose")
args = [goose_bin]
output = path.join(perennial_dir, "external/Goose")
args.extend(["-out", output])
args.extend(["-dir", src_path])
args.extend(pkgs)
do_run(args)
def run_goose_test_gen(src_path, output):
gen_bin = path.join(goose_dir, "cmd/test_gen/main.go")
args = ["go", "run", gen_bin, "-coq", "-out", output, src_path]
do_run(args)
if args.compile:
compile_goose()
if args.goose_examples:
run_goose_test_gen(
path.join(goose_dir, "internal/examples/semantics"),
path.join(
perennial_dir, "src/goose_lang/interpreter/generated_test.v"
),
)
run_goose(
path.join(goose_dir, "internal/examples"),
"./unittest",
"./semantics",
"./append_log",
"./logging2",
"./rfc1813",
"./simpledb",
"./wal",
)
if journal_dir is not None:
run_goose(
journal_dir,
"./addr",
"./alloc",
"./buf",
"./jrnl",
"./common",
"./lockmap",
"./obj",
"./util",
"./wal",
"./jrnl_replication",
"./txn",
)
if go_nfsd_dir is not None:
run_goose(
go_nfsd_dir,
"./kvs",
"./super",
"./fh",
"./simple",
"./nfstypes",
)
if examples_dir is not None:
run_goose(
examples_dir,
"./replicated_block",
"./alloc",
"./inode",
"./indirect_inode",
"./async_inode",
"./dir",
"./dynamic_dir",
"./single_inode",
"./single_async_inode",
"./toy",
"./async_toy",
"./async_durable_alloc_inode",
"./async_mem_alloc_inode",
"./async_mem_alloc_dir",
"./async_durable_alloc",
)
if distributed_dir is not None:
run_goose(distributed_dir, ".", "./grove_common")
if gokv_dir is not None:
pkgs = ["urpc", "memkv", "connman", "paxi/single", "bank",
"lockservice", "pb", "pb/controller", "ctrexample/client",
"ctrexample/server", "fencing/ctr", "fencing/config",
"fencing/frontend", "fencing/client", "erpc"]
for pkg in pkgs:
run_goose(
path.join(gokv_dir, pkg),
# XXX: need to change the Coq import statement for lockservice/ from
# "From Goose Require github_com.mit_pdos.lockservice.lockservice." to
# "From Goose Require github_com.mit_pdos.lockservice."
)
if mvcc_dir is not None:
run_goose(
mvcc_dir,
"./txn",
"./index",
"./gc",
"./tuple",
"./config",
"./common",
"./examples",
# "./cfmutex",
)
if marshal_dir is not None:
run_goose(marshal_dir, ".")
if std_dir is not None:
run_goose(std_dir, ".")
if __name__ == "__main__":
main()
|
import os
import shutil
import tinychain as tc
TC_PATH = "host/target/debug/tinychain"
PORT = 8702
def start_host(name, clusters=[], overwrite=True):
port = PORT
if clusters:
port = tc.uri(clusters[0]).port()
port = port if port else PORT
config = []
for cluster in clusters:
cluster_config = f"config/{name}"
cluster_uri = tc.uri(cluster)
if cluster_uri.port() is not None and cluster_uri.port() != port:
raise ValueError(f"invalid port {cluster_uri.port()}, expected {port}")
if cluster_uri.host():
cluster_config += f"/{cluster_uri.host()}"
if cluster_uri.port():
cluster_config += f"/{cluster_uri.port()}"
cluster_config += str(cluster_uri.path())
tc.write_cluster(cluster, cluster_config, overwrite)
config.append(cluster_config)
data_dir = "/tmp/tc/tmp/" + name
if overwrite and os.path.exists(data_dir):
shutil.rmtree(data_dir)
print(f"start host on port {port}")
return tc.host.Local(
TC_PATH,
workspace="/tmp/tc/tmp/" + name,
data_dir=data_dir,
clusters=config,
port=port,
log_level="debug",
force_create=True)
|
import logging
import os
import tempfile
import time
from typing import List, Tuple
import psycopg2
import requests
from django.http.response import HttpResponseRedirect
from django.utils import timezone
from qfieldcloud.authentication.models import AuthToken
from qfieldcloud.core.geodb_utils import delete_db_and_role
from qfieldcloud.core.models import Geodb, Job, Project, User
from rest_framework import status
from rest_framework.test import APITransactionTestCase
from .utils import testdata_path
logging.disable(logging.CRITICAL)
class QfcTestCase(APITransactionTestCase):
def setUp(self):
# Create a user
self.user1 = User.objects.create_user(username="user1", password="abc123")
self.user2 = User.objects.create_user(username="user2", password="abc123")
self.token1 = AuthToken.objects.get_or_create(user=self.user1)[0]
# Create a project
self.project1 = Project.objects.create(
name="project1", is_public=False, owner=self.user1
)
try:
delete_db_and_role("test", self.user1.username)
except Exception:
pass
self.geodb = Geodb.objects.create(
user=self.user1,
dbname="test",
hostname="geodb",
port=5432,
)
self.conn = psycopg2.connect(
dbname="test",
user=os.environ.get("GEODB_USER"),
password=os.environ.get("GEODB_PASSWORD"),
host="geodb",
port=5432,
)
def tearDown(self):
self.conn.close()
# Remove all projects avoiding bulk delete in order to use
# the overrided delete() function in the model
for p in Project.objects.all():
p.delete()
User.objects.all().delete()
# Remove credentials
self.client.credentials()
def upload_files(
self,
token: str,
project: Project,
files: List[Tuple[str, str]],
):
self.client.credentials(HTTP_AUTHORIZATION=f"Token {token}")
for local_filename, remote_filename in files:
if not local_filename:
continue
file = testdata_path(local_filename)
response = self.client.post(
f"/api/v1/files/{project.id}/{remote_filename}/",
{"file": open(file, "rb")},
format="multipart",
)
self.assertTrue(status.is_success(response.status_code))
def upload_files_and_check_package(
self,
token: str,
project: Project,
files: List[Tuple[str, str]],
expected_files: List[str],
job_create_error: Tuple[int, str] = None,
tempdir: str = None,
invalid_layers: List[str] = [],
):
self.client.credentials(HTTP_AUTHORIZATION=f"Token {token}")
self.upload_files(token, project, files)
before_started_ts = timezone.now()
response = self.client.post(
"/api/v1/jobs/",
{
"project_id": project.id,
"type": Job.Type.PACKAGE,
},
)
if job_create_error:
self.assertEqual(response.status_code, job_create_error[0])
self.assertEqual(response.json()["code"], job_create_error[1])
return
else:
self.assertTrue(status.is_success(response.status_code))
job_id = response.json().get("id")
# Wait for the worker to finish
for _ in range(20):
time.sleep(3)
response = self.client.get(f"/api/v1/jobs/{job_id}/")
payload = response.json()
if payload["status"] == Job.Status.FINISHED:
response = self.client.get(f"/api/v1/packages/{project.id}/latest/")
package_payload = response.json()
self.assertLess(
package_payload["packaged_at"], timezone.now().isoformat()
)
self.assertGreater(
package_payload["packaged_at"],
before_started_ts.isoformat(),
)
sorted_downloaded_files = [
f["name"]
for f in sorted(package_payload["files"], key=lambda k: k["name"])
]
sorted_expected_files = sorted(expected_files)
self.assertListEqual(sorted_downloaded_files, sorted_expected_files)
if tempdir:
for filename in expected_files:
response = self.client.get(
f"/api/v1/qfield-files/{self.project1.id}/project_qfield.qgs/"
)
local_file = os.path.join(tempdir, filename)
self.assertIsInstance(response, HttpResponseRedirect)
# We cannot use the self.client HTTP client, since it does not support
# requests outside the current Django App
# Using the rest_api_framework.RequestsClient is not much better, so better
# use the `requests` module
with requests.get(response.url, stream=True) as r:
with open(local_file, "wb") as f:
for chunk in r.iter_content():
f.write(chunk)
for layer_id in package_payload["layers"]:
layer_data = package_payload["layers"][layer_id]
if layer_id in invalid_layers:
self.assertFalse(layer_data["valid"], layer_id)
else:
self.assertTrue(layer_data["valid"], layer_id)
return
elif payload["status"] == Job.Status.FAILED:
self.fail("Worker failed with error")
self.fail("Worker didn't finish")
def test_list_files_for_qfield(self):
cur = self.conn.cursor()
cur.execute("CREATE TABLE point (id integer, geometry geometry(point, 2056))")
self.conn.commit()
cur.execute(
"INSERT INTO point(id, geometry) VALUES(1, ST_GeomFromText('POINT(2725505 1121435)', 2056))"
)
self.conn.commit()
self.upload_files_and_check_package(
token=self.token1.key,
project=self.project1,
files=[
("delta/project2.qgs", "project.qgs"),
("delta/points.geojson", "points.geojson"),
],
expected_files=["data.gpkg", "project_qfield.qgs"],
)
def test_list_files_missing_project_filename(self):
self.upload_files_and_check_package(
token=self.token1.key,
project=self.project1,
files=[
("delta/points.geojson", "points.geojson"),
],
job_create_error=(400, "no_qgis_project"),
expected_files=[],
)
def test_project_never_packaged(self):
self.upload_files(
token=self.token1.key,
project=self.project1,
files=[
("delta/project2.qgs", "project.qgs"),
],
)
response = self.client.get(f"/api/v1/packages/{self.project1.id}/latest/")
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json()["code"], "invalid_job")
def test_download_file_for_qfield(self):
tempdir = tempfile.mkdtemp()
self.upload_files_and_check_package(
token=self.token1.key,
project=self.project1,
files=[
("delta/nonspatial.csv", "nonspatial.csv"),
("delta/testdata.gpkg", "testdata.gpkg"),
("delta/points.geojson", "points.geojson"),
("delta/polygons.geojson", "polygons.geojson"),
("delta/project.qgs", "project.qgs"),
],
expected_files=[
"data.gpkg",
"project_qfield.qgs",
],
tempdir=tempdir,
)
local_file = os.path.join(tempdir, "project_qfield.qgs")
with open(local_file, "r") as f:
self.assertEqual(
f.readline().strip(),
"<!DOCTYPE qgis PUBLIC 'http://mrcc.com/qgis.dtd' 'SYSTEM'>",
)
def test_list_files_for_qfield_broken_file(self):
self.upload_files(
token=self.token1.key,
project=self.project1,
files=[
("delta/broken.qgs", "broken.qgs"),
],
)
response = self.client.post(
"/api/v1/jobs/",
{
"project_id": self.project1.id,
"type": Job.Type.PACKAGE,
},
)
self.assertTrue(status.is_success(response.status_code))
job_id = response.json().get("id")
# Wait for the worker to finish
for _ in range(10):
time.sleep(3)
response = self.client.get(
f"/api/v1/jobs/{job_id}/",
)
if response.json()["status"] == "failed":
return
self.fail("Worker didn't finish")
def test_downloaded_file_has_canvas_name(self):
tempdir = tempfile.mkdtemp()
self.upload_files_and_check_package(
token=self.token1.key,
project=self.project1,
files=[
("delta/nonspatial.csv", "nonspatial.csv"),
("delta/testdata.gpkg", "testdata.gpkg"),
("delta/points.geojson", "points.geojson"),
("delta/polygons.geojson", "polygons.geojson"),
("delta/project.qgs", "project.qgs"),
],
expected_files=[
"data.gpkg",
"project_qfield.qgs",
],
tempdir=tempdir,
)
local_file = os.path.join(tempdir, "project_qfield.qgs")
with open(local_file, "r") as f:
for line in f:
if 'name="theMapCanvas"' in line:
return
def test_download_project_with_broken_layer_datasources(self):
self.upload_files_and_check_package(
token=self.token1.key,
project=self.project1,
files=[
("delta/points.geojson", "points.geojson"),
(
"delta/project_broken_datasource.qgs",
"project_broken_datasource.qgs",
),
],
expected_files=[
"data.gpkg",
"project_broken_datasource_qfield.qgs",
],
invalid_layers=["surfacestructure_35131bca_337c_483b_b09e_1cf77b1dfb16"],
)
def test_filename_with_whitespace(self):
self.upload_files_and_check_package(
token=self.token1.key,
project=self.project1,
files=[
("delta/nonspatial.csv", "nonspatial.csv"),
("delta/testdata.gpkg", "testdata.gpkg"),
("delta/points.geojson", "points.geojson"),
("delta/polygons.geojson", "polygons.geojson"),
("delta/project.qgs", "project.qgs"),
],
expected_files=[
"data.gpkg",
"project_qfield.qgs",
],
)
|
# Generated by Django 3.0.4 on 2020-05-12 15:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('maintenancemanagement', '0009_auto_20200505_2145'),
]
operations = [
migrations.RemoveField(
model_name='task',
name='fields_groups',
),
]
|
import movietools, os, sys, visit
###############################################################################
# Class: MovieGenerator
#
# Purpose: This base class contains the machinery to create a movie and
# it provides restart capability so that if the script stops
# running, it can be restarted and it will pick up where it left
# off.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##############################################################################
class MovieGenerator:
def __init__(self, ops):
# Movie properties
self.movieBase = "movie"
self.basename = "source"
self.xres = 1024
self.yres = 1024
self.processedBaseName = "frame"
self.processedBaseFormat = "%s-%dx%d-%%04d.png" % (self.processedBaseName, self.xres, self.yres)
self.fileIndex = 0
self.movieFormats = []
# Script operation properties
self.stage = "GenerateFrames"
self.ops = ops
self.flags = {"invalid":0}
# Database properties
self.dataHost = "localhost"
self.timestepFile = "movie.visit"
self.nstep = 1
# Engine properties
self.engineNP = 1
self.launchMethod = "psub"
self.poolName = "pbatch"
self.visitDir = "/usr/gapps/visit"
self.timeLimit = 60
self.engineArgs = []
self.preventativeClose = 0
# MDServer properties
self.mdserverArgs = []
# Set up some callbacks to visit module functions.
self.CloseComputeEngine = visit.CloseComputeEngine
self.DeleteAllPlots = visit.DeleteAllPlots
self.DrawPlots = visit.DrawPlots
self.OpenComputeEngine = visit.OpenComputeEngine
self.OpenDatabase = visit.OpenDatabase
self.OpenMDServer = visit.OpenMDServer
self.SaveWindow = visit.SaveWindow
self.SaveWindowAttributes = visit.SaveWindowAttributes
self.SetSaveWindowAttributes = visit.SetSaveWindowAttributes
# If the -clear command line argument was given then remove the
# restart files.
if("-clear" in sys.argv):
self.RemoveRestartFiles()
# Look for the -geometry argument in the cli's arguments and pick
# out the image resolution if it was given.
for i in range(1, len(sys.argv)):
if(sys.argv[i] == "-geometry" and ((i+1) < len(sys.argv))):
geometry = sys.argv[i+1]
xloc = geometry.find("x")
if(xloc != -1):
self.xres = int(geometry[:xloc])
self.yres = int(geometry[xloc+1:])
self.processedBaseFormat = "%s-%dx%d-%%04d.png" % (self.processedBaseName, self.xres, self.yres)
break
##########################################################################
# Method: requestStreamingMovie
#
# Purpose: This method tells the movie generator to create a streaming
# movie of the specified size.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def requestStreamingMovie(self, xres, yres, tar):
self.movieFormats = self.movieFormats + [("sm", xres, yres, tar)]
##########################################################################
# Method: requestQuickTimeMovie
#
# Purpose: This method tells the movie generator to create a
# QuickTime movie of the specified size.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def requestQuickTimeMovie(self, xres, yres, tar):
self.movieFormats = self.movieFormats + [("qt", xres, yres, tar)]
##########################################################################
# Method: requestMPEGMovie
#
# Purpose: This method tells the movie generator to create an MPEG
# movie of the specified size.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def requestMPEGMovie(self, xres, yres, tar):
self.movieFormats = self.movieFormats + [("mpeg", xres, yres, tar)]
##########################################################################
# Method: CreateMovie
#
# Purpose: This the main method for creating the movie.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
# Modifications:
# Brad Whitlock, Wed Jan 29 07:53:48 PDT 2003
# I fixed a bug that prevented the stage from the restart file from
# being used.
#
##########################################################################
def CreateMovie(self):
# Read the list of files to use.
times = self.ReadTimeSteps()
# Read the restart file and figure out where we should pick up.
restart = self.ProcessRestartFile(times)
self.stage = restart[0]
t = restart[1]
startFile = restart[2]
# If the restart indicated that we're at the GenerateFrames stage,
# then generate the remaining movie frames.
if(self.stage == "GenerateFrames"):
self.GenerateFrames(times, t, startFile)
self.stage = "ImageProcessing"
# If the restart indicated that we're at the ImageProcessing stage,
# then process the images into new frames.
nframes = 0
nframesUnknown = 1
if(self.stage == "ImageProcessing"):
self.WriteScriptRestart(self.stage, "done", self.fileIndex - 1)
nframes = self.ops.process(self.processedBaseFormat, 0)
self.stage = "Packaging"
self.WriteScriptRestart(self.stage, "done", nframes)
nframesUnknown = 0
# If the restart indicated that we're at the Packaging stage,
# then package the frames into movies.
if(self.stage == "Packaging"):
# If we don't know the number of frames, read the restart file
# again to get that information
if(nframesUnknown):
restart = self.ProcessRestartFile(times)
nframes = restart[2]
self.PackageMovies(nframes)
self.stage = "Done"
self.WriteScriptRestart(self.stage, "done", nframes)
if(self.stage == "Done"):
print "The movies are done."
##########################################################################
# Method: SetupVisualization
#
# Purpose: This method is called to perform visualization at t==0. If
# any special coding is desired, override in subclasses.
#
# Notes: You have to call SaveSource frame for this method to save images.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
# Modifications:
# Brad Whitlock, Wed Jan 29 07:35:29 PDT 2003
# I added the index and times arguments. index is the index of the
# current dbName within the times database name tuple.
#
##########################################################################
def SetupVisualization(self, dbName, t, index, times): pass
##########################################################################
# Method: ProcessRestartFile
#
# Purpose: This method reads the restart file and interprets the stored
# values so that we know the stage that we're at in the movie
# generation pipeline and the file that we should be working
# on.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
# Modifications:
# Brad Whitlock, Tue Jan 28 14:40:34 PST 2003
# I fixed an error reading flags back from the restart file. I also
# prevented the t value from being anything but one if the stage is
# not "GenerateFrames".
#
##########################################################################
def ProcessRestartFile(self, times):
# Initialize some local variables.
t = 0.
stage = "GenerateFrames"
startFile = 0
startImageIndex = 0
# Try reading the script's restart file.
restart = self.ReadScriptRestart()
# If we have restart information, use it to determine the last file
# that was processed successfully and compute the initial "t" value
# as well as the index that should be used to save the next image file.
if(len(restart) >= 3):
# Get the raw values from the restart file.
stage = restart[0]
lastFile = restart[1]
lastImageIndex = restart[2]
# Determine the initial time that we're trying to visualize.
findex = 0
for i in range(0, len(times)):
if(times[i] == lastFile):
findex = i
break;
startFile = findex + 1
if(stage == "GenerateFrames"):
t = float(startFile) / float(len(times) - 1)
else:
t = 1.
print "Initial t value=%g" % t
# Determine the initial image frame that we should save.
startImageIndex = int(lastImageIndex) + 1
# Try and read the image ops so we don't overwrite them.
self.ReadImageOps()
# Set the internal flags from the information in the restart file.
if(len(restart) > 3):
for line in restart[3:]:
eloc = line.find("=")
if(eloc != -1):
key = line[:eloc]
val = int(line[eloc+1:])
self.flags[key] = val
print "Read flag \"%s\" from restart. Its value is %d" % (key,val)
elif(len(restart) > 0):
print "The restart file has an error. We're not going to use it."
# Set the file index, which is used to create names for saved images,
# from the value stored in the restart file.
self.fileIndex = startImageIndex
return (stage, t, startFile)
##########################################################################
# Method: SetFlag
#
# Purpose: This method sets the value of a flag that we use for
# program execution. The flags are saved to the restart file.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def SetFlag(self, name, boolean):
self.flags[name] = (boolean != 0)
##########################################################################
# Method: GetFlag
#
# Purpose: This method returns the value of the named flag.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def GetFlag(self, name):
retval = 0
try:
retval = self.flags[name]
except KeyError:
retval = 0
return retval
##########################################################################
# Method: GenerateFrames
#
# Purpose: This method generates the frames for the movie. Its main
# job is to handle restart and iterating through the list of
# timesteps while calling other helper method to actually
# set up the plots.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
# Modifications:
# Brad Whitlock, Wed Jan 22 14:27:47 PST 2003
# I added code to launch an mdserver.
#
# Brad Whitlock, Tue Jan 28 14:39:25 PST 2003
# I moved the code to delete the plots so it happens before we open
# the new database to avoid a bug in the viewer that causes the plots
# to be reexecuted needlessly on the engine. I also changed the
# interface of the SetupVisualization method so it accepts new args.
#
##########################################################################
def GenerateFrames(self, times, t, startFile):
timeStepsPerEngine = 15
timeStepsProcessed = 1
# Launch an mdserver
self.LaunchMDServer()
# Launch a compute engine.
self.LaunchComputeEngine()
# Iterate through the timesteps.
for i in range(startFile, len(times)):
# t is the parametric time through the animation.
t = float(i) / float(len(times) - 1)
# Delete any plots that may be in the window.
self.DeleteAllPlots()
# Open the first timestep and delete all plots.
if(self.OpenDatabase(times[i]) != 1):
# This may have failed because the engine was dead. Try to
# start another engine.
self.LaunchComputeEngine()
# Set up the visualization for time t.
self.SetupVisualization(times[i], t, i, times)
# Save the script's restart file.
self.WriteScriptRestart(self.stage, times[i], self.fileIndex - 1)
# Save the image ops
self.WriteImageOps()
# If we've processed a certain number of timesteps with this compute
# engine then close it down and restart it so hopefully, it will be
# less likely to crash and stall the script.
timeStepsProcessed = timeStepsProcessed + 1
if(self.preventativeClose and (timeStepsProcessed == timeStepsPerEngine)):
print "**\n**\n**\n** We're shutting down the compute engine!**\n**\n**\n"
self.CloseComputeEngine()
print "**\n**\n**\n** We're restarting the compute engine!**\n**\n**\n"
self.LaunchComputeEngine()
timeStepsProcessed = 0
# Close the compute engine
self.CloseComputeEngine()
##########################################################################
# Method: LaunchMDServer
#
# Purpose: This method tells VisIt how to launch an mdserver.
#
# Programmer: Brad Whitlock
# Date: Wed Jan 22 14:27:10 PST 2003
#
##########################################################################
def LaunchMDServer(self):
args = tuple(["-dir", self.visitDir] + list(self.mdserverArgs))
self.OpenMDServer(self.dataHost, args)
##########################################################################
# Method: LaunchComputeEngine
#
# Purpose: This method tells VisIt how to launch a compute engine.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def LaunchComputeEngine(self):
retval = 0
if(self.engineNP > 1):
if(self.dataHost[:5] == "frost" or self.dataHost[:5] == "white"):
nn = self.engineNP / 16
nnstr = "%d" % nn
npstr = "%d" % self.engineNP
limitstr = "%d" % self.timeLimit
args = ["-np", npstr, "-nn", nnstr, "-l", self.launchMethod, "-p", self.poolName, "-b", "bdivp", "-forcestatic", "-t", limitstr]
elif(self.dataHost[:4] == "blue"):
nn = self.engineNP / 4
nnstr = "%d" % nn
npstr = "%d" % self.engineNP
limitstr = "%d" % self.timeLimit
args = ["-np", npstr, "-nn", nnstr, "-l", self.launchMethod, "-p", self.poolName, "-b", "bdivp", "-forcestatic", "-t", limitstr]
else:
npstr = "%d" % self.engineNP
args = ["-np", npstr, "-forcestatic"]
# Add more arguments.
args = args + list(self.engineArgs)
if(self.visitDir != "/usr/gapps/visit"):
args = args + ["-dir", self.visitDir]
retval = self.OpenComputeEngine(self.dataHost, tuple(args))
elif(len(self.engineArgs) > 0):
retval = self.OpenComputeEngine(self.dataHost, tuple(self.engineArgs))
else:
retval = self.OpenComputeEngine(self.dataHost)
return retval
##########################################################################
# Method: ReadScriptRestart
#
# Purpose: Reads the script's restart file and return the contents.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def ReadScriptRestart(self):
return movietools.ReadDataFile("%s-restart" % self.movieBase)
##########################################################################
# Method: WriteScriptRestart
#
# Purpose: This method writes the script's restart file which it uses
# to pick up where it left off if there were errors.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def WriteScriptRestart(self, stage, lastDataFile, lastImageFile):
try:
# Write the restart file.
f = open("%s-restart" % self.movieBase, "w")
f.write("%s\n" % stage)
f.write("%s\n" % lastDataFile)
f.write("%d\n" % lastImageFile)
# Write the flags so we can keep a little program state
for key in self.flags.keys():
if(key != "invalid"):
f.write("%s=%d\n" % (key, self.flags[key]))
f.close()
except IOError:
print "Could not write the script restart file"
##########################################################################
# Method: WriteImageOps
#
# Purpose: This method writes the script's image ops to a file.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def WriteImageOps(self):
self.ops.backup("%s-imageops" % self.movieBase)
##########################################################################
# Method: ReadImageOps
#
# Purpose: This method reads the script's image ops from a file.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def ReadImageOps(self):
self.ops.restore("%s-imageops" % self.movieBase)
##########################################################################
# Method: RemoveRestartFiles
#
# Purpose: This method removes the script's restart files.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def RemoveRestartFiles(self):
print "Removing the restart file"
restartName = "%s-restart" % self.movieBase
imageOpsName = "%s-imageops" % self.movieBase
command = "rm -f %s %s" % (restartName, imageOpsName)
os.system(command)
##########################################################################
# Method: ReadTimeSteps
#
# Purpose: This method reads the timesteps from the .visit file and
# strides through them to produce the list of timesteps that
# we'll use to make the movie.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def ReadTimeSteps(self):
# Read the name of each timestep in the timestep file.
lines = movietools.ReadDataFile(self.timestepFile)
# Stride through the timesteps to create a list of timesteps.
L = []
for i in range(0, len(lines), self.nstep):
line = lines[i]
if(line[0] == '/'):
# Add the hostname of where the data's stored.
L = L + ["%s:%s" % (self.dataHost, line)]
else:
L = L + [line]
return L
##########################################################################
# Method: SaveImage
#
# Purpose: This method saves the active window to a tiff image.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def SaveImage(self, filename):
s = self.SaveWindowAttributes()
s.fileName = filename
s.family = 0
s.format = self.SaveWindowAttributes().TIFF
self.SetSaveWindowAttributes(s)
return self.SaveWindow()
##########################################################################
# Method: SaveSourceFrame
#
# Purpose: This method saves the active window to an image file as
# part of the running image sequence.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def SaveSourceFrame(self):
name = "%s%04d.tif" % (self.basename, self.fileIndex)
self.fileIndex = self.fileIndex + 1
self.SaveImage(name)
return name
##########################################################################
# Method: ProtectedDrawPlots
#
# Purpose: This method tells the viewer to draw the plots. If it
# encounters errors while drawing plots, it tries to restart
# the engine and redraw the plots.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def ProtectedDrawPlots(self):
if not self.DrawPlots():
self.LaunchComputeEngine()
if not self.DrawPlots():
self.LaunchComputeEngine()
if not self.DrawPlots():
print "DrawPlots failed"
##########################################################################
# Method: PackageMovies
#
# Purpose: This method packages the movie frames into various playable
# movie formats.
#
# Programmer: Brad Whitlock
# Date: Tue Jan 14 16:28:16 PST 2003
#
##########################################################################
def PackageMovies(self, nframes):
#
# Loop over the requested movies to determine the list of sizes and
# formats that we need to create from the source images.
#
generatedFrameInfo = (self.processedBaseFormat, self.xres, self.yres)
formatsWeNeed = [generatedFrameInfo]
for format in self.movieFormats:
movieFormat = format[0]
xres = format[1]
yres = format[2]
ppm = "%s-%dx%d-%%04d.ppm" % (self.movieBase, xres, yres)
png = "%s-%dx%d-%%04d.png" % (self.movieBase, xres, yres)
tif = "%s-%dx%d-%%04d.tif" % (self.movieBase, xres, yres)
if(movieFormat == "sm"):
if((png, xres, yres) in formatsWeNeed):
continue
elif((tif, xres, yres) in formatsWeNeed):
continue
else:
formatsWeNeed = formatsWeNeed + [(tif, xres, yres)]
elif(movieFormat == "qt"):
if((tif, xres, yres) in formatsWeNeed):
continue
else:
formatsWeNeed = formatsWeNeed + [(tif, xres, yres)]
elif(movieFormat == "mpeg"):
if((ppm, xres, yres) in formatsWeNeed):
continue
else:
formatsWeNeed = formatsWeNeed + [(ppm, xres, yres)]
#
# Create the list of resize/conversions that we need to perform.
#
conversions = []
for format in formatsWeNeed:
if(format == generatedFrameInfo):
continue
else:
conversions = conversions + [format]
#
# Perform the resizes and conversions.
#
if(len(conversions) > 0):
movietools.resizeMultipleFrames(self.processedBaseFormat, nframes, conversions)
#
# Generate movies
#
for format in self.movieFormats:
movieFormat = format[0]
xres = format[1]
yres = format[2]
tar = format[3]
movieName = "%s-%dx%d.%s" % (self.movieBase, xres, yres, movieFormat)
# Create the frame baseformats
ppm = "%s-%dx%d-%%04d.ppm" % (self.movieBase, xres, yres)
png = "%s-%dx%d-%%04d.png" % (self.movieBase, xres, yres)
tif = "%s-%dx%d-%%04d.tif" % (self.movieBase, xres, yres)
# Create the tar file names
ppmTar = "%s-ppm-frames-%dx%d.tar" % (self.movieBase, xres, yres)
pngTar = "%s-png-frames-%dx%d.tar" % (self.movieBase, xres, yres)
tifTar = "%s-tif-frames-%dx%d.tar" % (self.movieBase, xres, yres)
# Create the tar file contents
ppmTarFiles = "%s-%dx%d-????.ppm" % (self.movieBase, xres, yres)
pngTarFiles = "%s-%dx%d-????.png" % (self.movieBase, xres, yres)
tifTarFiles = "%s-%dx%d-????.tif" % (self.movieBase, xres, yres)
if(movieFormat == "sm"):
if((png, xres, yres) in formatsWeNeed):
movietools.createStreamingMovie(movieName, png, nframes)
if(tar):
command = "tar -cf %s %s" % (pngTar, pngTarFiles)
os.system(command)
else:
movietools.createStreamingMovie(movieName, tif, nframes)
if(tar):
command = "tar -cf %s %s" % (tifTar, tifTarFiles)
os.system(command)
elif(movieFormat == "qt"):
movietools.createQuickTimeMovie(movieName, tif, nframes, xres, yres)
if(tar):
command = "tar -cf %s %s" % (tifTar, tifTarFiles)
os.system(command)
elif(movieFormat == "mpeg"):
movietools.createMPEG(movieName, ppm, nframes, xres, yres)
if(tar):
command = "tar -cf %s %s" % (ppmTar, ppmTarFiles)
os.system(command)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import six
from requests import RequestException
class TeambitionException(RequestException):
def __init__(self, code=-1, message=None, client=None, *args, **kwargs):
super(TeambitionException, self).__init__(*args, **kwargs)
self.code = code
self.message = message
self.client = client
def __str__(self):
if six.PY2:
return self.response.content
else:
return self.response.text
__repr__ = __str__
|
# -*- coding: utf-8 -*-
'''
File name: code\cutting_rope\sol_398.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #398 :: Cutting rope
#
# For more information see:
# https://projecteuler.net/problem=398
# Problem Statement
'''
Inside a rope of length n, n-1 points are placed with distance 1 from each other and from the endpoints. Among these points, we choose m-1 points at random and cut the rope at these points to create m segments.
Let E(n, m) be the expected length of the second-shortest segment.
For example, E(3, 2) = 2 and E(8, 3) = 16/7.
Note that if multiple segments have the same shortest length the length of the second-shortest segment is defined as the same as the shortest length.
Find E(107, 100).
Give your answer rounded to 5 decimal places behind the decimal point.
'''
# Solution
# Solution Approach
'''
'''
|
import copy
import logging
import random
from ckan_api_client.exceptions import HTTPError
from ckan_api_client.high_level import CkanHighlevelClient
from ckan_api_client.objects import CkanDataset, CkanOrganization, CkanGroup
from ckan_api_client.utils import IDMap, IDPair
# Extras field containing id of the external source.
# The id is simply source_name:
HARVEST_SOURCE_ID_FIELD = '_harvest_source'
logger = logging.getLogger(__name__)
class SynchronizationClient(object):
"""
Synchronization client, providing functionality for importing
collections of datasets into a Ckan instance.
Synchronization acts as follows:
- Snsure all the required organizations/groups are there;
create a map between "source" ids and Ckan ids.
Optionally update existing organizations/groups with
new details.
- Find all the Ckan datasets matching the ``source_name``
- Determine which datasets...
- ...need to be created
- ...need to be updated
- ...need to be deleted
- First, delete datasets to be deleted in order to free up names
- Then, create datasets that need to be created
- Lastly, update datasets using the configured merge strategy
(see constructor arguments).
"""
def __init__(self, base_url, api_key=None, **kw):
"""
:param base_url:
Base URL of the Ckan instance, passed to high-level client
:param api_key:
API key to be used, passed to high-level client
:param organization_merge_strategy: One of:
- 'create' (default) if the organization doesn't exist, create it.
Otherwise, leave it alone.
- 'update' if the organization doesn't exist, create it.
Otherwise, update with new values.
:param group_merge_strategy: One of:
- 'create' (default) if the group doesn't exist, create it.
Otherwise, leave it alone.
- 'update' if the group doesn't exist, create it.
Otherwise, update with new values.
:param dataset_preserve_names:
if ``True`` (the default) will preserve old names of existing
datasets
:param dataset_preserve_organization:
if ``True`` (the default) will preserve old organizations of
existing datasets.
:param dataset_group_merge_strategy:
- 'add' add groups, keep old ones (default)
- 'replace' replace all existing groups
- 'preserve' leave groups alone
"""
self._client = CkanHighlevelClient(base_url, api_key)
self._conf = {
'organization_merge_strategy': 'create',
'group_merge_strategy': 'create',
'dataset_preserve_names': True,
'dataset_preserve_organization': True,
'dataset_group_merge_strategy': 'add',
}
self._conf.update(kw)
def sync(self, source_name, data):
"""
Synchronize data from a source into Ckan.
- datasets are matched by _harvest_source
- groups and organizations are matched by name
:param source_name:
String identifying the source of the data. Used to build
ids that will be used in further synchronizations.
:param data:
Data to be synchronized. Should be a dict (or dict-like)
with top level keys coresponding to the object type,
mapping to dictionaries of ``{'id': <object>}``.
"""
groups = dict(
(key, CkanGroup(val))
for key, val in data['group'].iteritems())
organizations = dict(
(key, CkanOrganization(val))
for key, val in data['organization'].iteritems())
# Upsert groups and organizations
groups_map = self._upsert_groups(groups)
orgs_map = self._upsert_organizations(organizations)
# Create list of datasets to be synced
source_datasets = {}
for source_id, dataset_dict in data['dataset'].iteritems():
_dataset_dict = copy.deepcopy(dataset_dict)
# We need to make sure "source" datasets
# don't have (otherwise misleading) ids
_dataset_dict.pop('id', None)
# We need to update groups and organizations,
# to map their name from the source into a
# ckan id
_dataset_dict['groups'] = [
groups_map.to_ckan(grp_id)
for grp_id in _dataset_dict['groups']
]
_dataset_dict['owner_org'] = \
orgs_map.to_ckan(_dataset_dict['owner_org'])
dataset = CkanDataset(_dataset_dict)
# We also want to add the "source id", used for further
# synchronizations to find stuff
dataset.extras[HARVEST_SOURCE_ID_FIELD] = \
self._join_source_id(source_name, source_id)
source_datasets[source_id] = dataset
# Retrieve list of datasets from Ckan
ckan_datasets = self._find_datasets_by_source(source_name)
# Compare collections to find differences
differences = self._compare_collections(
ckan_datasets, source_datasets)
# ------------------------------------------------------------
# We now need to create/update/delete datasets.
# todo: we need to make sure dataset names are not
# already used by another dataset. The only
# way is to randomize resource names and hope
# a 409 response indicates duplicate name..
# We delete first, in order to (possibly) deallocate
# some already-used names..
for source_id in differences['left']:
ckan_id = ckan_datasets[source_id].id
logger.info('Deleting dataset {0}'.format(ckan_id))
self._client.delete_dataset(ckan_id)
def force_dataset_operation(operation, dataset, retry=5):
# Maximum dataset name length is 100 characters
# We trim it down to 80 just to be safe.
# Note: we generally want to preserve the original name
# and there should *never* be problems with that
# when updating..
_orig_name = dataset.name[:80]
dataset.name = _orig_name
while True:
try:
result = operation(dataset)
except HTTPError, e:
if e.status_code != 409:
raise
retry -= 1
if retry < 0:
raise
dataset.name = '{0}-{1:06d}'.format(
_orig_name,
random.randint(0, 999999))
logger.debug('Got 409: trying to rename dataset to {0}'
.format(dataset.name))
else:
return result
# Create missing datasets
for source_id in differences['right']:
logger.info('Creating dataset {0}'.format(source_id))
dataset = source_datasets[source_id]
force_dataset_operation(self._client.create_dataset, dataset)
# Update outdated datasets
for source_id in differences['differing']:
logger.info('Updating dataset {0}'.format(source_id))
# dataset = source_datasets[source_id]
old_dataset = ckan_datasets[source_id]
new_dataset = source_datasets[source_id]
dataset = self._merge_datasets(old_dataset, new_dataset)
dataset.id = old_dataset.id # Mandatory!
self._client.update_dataset(dataset) # should never fail!
def _merge_datasets(self, old, new):
# Preserve dataset names
if self._conf['dataset_preserve_names']:
new.name = old.name
# Merge groups according to configured strategy
_strategy = self._conf['dataset_group_merge_strategy']
if _strategy == 'add':
# We want to preserve the order!
groups = list(old.groups)
for g in new.groups:
if g not in groups:
groups.append(g)
new.groups = groups
elif _strategy == 'replace':
# Do nothing, we just want the new groups to replace
# the old ones -- no need to merge
pass
elif _strategy == 'preserve':
# Simply discard the new groups, keep the old ones
new.groups = old.groups
else:
# Invalid value! Shouldn't this have been catched
# before?
pass
# What should we do with owner organization?
if self._conf['dataset_preserve_organization']:
if old.owner_org:
new.owner_org = old.owner_org
return new
def _upsert_groups(self, groups):
"""
:param groups:
dict mapping ``{org_name : CkanGroup()}``
:return: a map of source/ckan ids of groups
:rtype: IDMap
"""
idmap = IDMap()
for group_name, group in groups.iteritems():
if not isinstance(group, CkanGroup):
raise TypeError("Expected CkanGroup, got {0!r}"
.format(type(group)))
if group.name is None:
group.name = group_name
if group.name != group_name:
raise ValueError("Mismatching group name!")
try:
ckan_group = self._client.get_group_by_name(
group_name, allow_deleted=True)
except HTTPError, e:
if e.status_code != 404:
raise
# We need to create the group
group.id = None
group.state = 'active'
created_group = self._client.create_group(group)
idmap.add(IDPair(source_id=group.name,
ckan_id=created_group.id))
else:
# The group already exist. It might be logically
# deleted, but we don't care -> just update and
# make sure it is marked as active.
# todo: make sure we don't need to preserve users and stuff,
# otherwise we need to workaround that in hi-lev client
group_id = ckan_group.id
if self._conf['group_merge_strategy'] == 'update':
# If merge strategy is 'update', we should update
# the group.
group.state = 'active'
group.id = ckan_group.id
updated_group = self._client.update_group(group)
group_id = updated_group.id
elif group.state != 'active':
# We only want to update the **original** group to set it
# as active, but preserving original values.
ckan_group.state = 'active'
updated_group = self._client.update_group(ckan_group)
group_id = updated_group.id
idmap.add(IDPair(source_id=group.name, ckan_id=group_id))
return idmap
def _upsert_organizations(self, orgs):
"""
:param orgs:
dict mapping ``{org_name : CkanOrganization()}``
:return: a map of source/ckan ids of organizations
:rtype: IDMap
"""
idmap = IDMap()
for org_name, org in orgs.iteritems():
if not isinstance(org, CkanOrganization):
raise TypeError("Expected CkanOrganization, got {0!r}"
.format(type(org)))
if org.name is None:
org.name = org_name
if org.name != org_name:
raise ValueError("Mismatching org name!")
try:
ckan_org = self._client.get_organization_by_name(
org_name, allow_deleted=True)
except HTTPError, e:
if e.status_code != 404:
raise
# We need to create the org
org.id = None
org.state = 'active'
created_org = self._client.create_organization(org)
idmap.add(IDPair(source_id=org.name,
ckan_id=created_org.id))
else:
# We only want to update if state != 'active'
org_id = ckan_org.id
if self._conf['organization_merge_strategy'] == 'update':
# If merge strategy is 'update', we should update
# the group.
org.state = 'active'
org.id = ckan_org.id
updated_org = self._client.update_organization(org)
org_id = updated_org.id
elif org.state != 'active':
# We only want to update the **original** org to set it
# as active, but preserving original values.
ckan_org.state = 'active'
updated_org = self._client.update_organization(ckan_org)
org_id = updated_org.id
idmap.add(IDPair(source_id=org_name,
ckan_id=org_id))
return idmap
def _find_datasets_by_source(self, source_name):
"""
Find all datasets matching the current source.
Returns a dict mapping source ids with dataset objects.
"""
results = {}
for dataset in self._client.iter_datasets():
if HARVEST_SOURCE_ID_FIELD not in dataset.extras:
continue
source_id = dataset.extras[HARVEST_SOURCE_ID_FIELD]
_name, _id = self._parse_source_id(source_id)
if _name == source_name:
results[_id] = dataset
return results
def _parse_source_id(self, source_id):
splitted = source_id.split(':')
if len(splitted) != 2:
raise ValueError("Invalid source id")
return splitted
def _join_source_id(self, source_name, source_id):
return ':'.join((source_name, source_id))
def _compare_collections(self, left, right):
"""
Compare two collections of objects.
Both collections are dictionaries mapping "source" ids
with objects.
:param left:
The "original" collection, retrieved from Ckan.
Objects will already have ids.
``left`` is the collection retrieved
The two collections are simply dictionaries of objects;
keys are the ids (used internally by the source).
Values in the right will contain Ckan ids, while the ones
in the left will not.
:returns:
A dictionary mapping names to sets of keys:
* ``common`` -- keys in both mappings
* ``differing`` -- keys of differing objects
* ``left`` -- keys of objects that are only in ckan
* ``right`` -- keys of objects that are not in ckan
"""
left_keys = set(left.iterkeys())
right_keys = set(right.iterkeys())
common_keys = left_keys & right_keys
left_only_keys = left_keys - right_keys
right_only_keys = right_keys - left_keys
differing = set(k for k in common_keys if left[k] != right[k])
return {
'common': common_keys,
'left': left_only_keys,
'right': right_only_keys,
'differing': differing,
}
|
import GPy
import numpy as np
import os, sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, os.pardir))
GP_prob_folder = os.path.join(ROOT_DIR, 'GP_prob')
sys.path.append(GP_prob_folder)
from custom_kernel_matrix.custom_kernel_matrix import CustomMatrix
class CustomMean(GPy.core.Mapping):
def __init__(self,X,means):
GPy.core.Mapping.__init__(self, input_dim=X.shape[1], output_dim=1, name="custom_means")
self.X = X
self.means = means
# self.link_parameter(GPy.core.parameterization.Param('means', means))
def f(self,X):
indices = np.concatenate([np.nonzero(np.prod(self.X == x,1))[0] for x in X])
if np.all(np.isin(X,self.X)):
if len(indices) != X.shape[0]:
raise NotImplementedError("Some elements of X appear more than once in self.X")
else:
return self.means[indices]
else:
raise NotImplementedError("Some elements of X are not in self.X")
def update_gradients(self, dL_dF, X):
# self.means.gradient = dL_dF.sum(0)
pass
def gradients_X(self, dL_dF, X):
return np.zeros_like(X)
def nngp_mse_heaviside_posteror_params(Xtrain,Ytrain,Xtest,Kfull):
#find out the analytical posterior
Xfull = np.concatenate([Xtrain,Xtest])
inference_method = GPy.inference.latent_function_inference.exact_gaussian_inference.ExactGaussianInference()
lik = GPy.likelihoods.gaussian.Gaussian(variance=0.002)
kernel = CustomMatrix(Xfull.shape[1],Xfull,Kfull)
gp_model = GPy.core.GP(X=Xtrain,Y=Ytrain,kernel=kernel,inference_method=inference_method, likelihood=lik)
mean, cov = gp_model.predict(Xtest,full_cov=True)
return mean, cov
def nngp_mse_heaviside_posteror_logp(Xtest,Ytest,mean,cov):
#use EP approximation to estimate probability of binary labelling on test set.
linkfun = GPy.likelihoods.link_functions.Heaviside()
lik = GPy.likelihoods.Bernoulli(linkfun)
inference_method = GPy.inference.latent_function_inference.expectation_propagation.EP(parallel_updates=False)
m = GPy.core.GP(X=Xtest,
Y=Ytest,
kernel=CustomMatrix(Xtest.shape[1],Xtest,cov),
inference_method=inference_method,
mean_function=CustomMean(Xtest,mean),
likelihood=lik)
# return m.log_prior()
return m.log_likelihood()
|
#Grid expansionA*
#Mapping the path
grid1 = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0]]
grid = [[0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]
heuristic = [[9, 8, 7, 6, 5, 4],
[8, 7, 6, 5, 4, 3],
[7, 6, 5, 4, 3, 2],
[6, 5, 4, 3, 2, 1],
[5, 4, 3, 2, 1, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
cost = 1
delta = [[-1, 0], # go up
[ 0,-1], # go left
[ 1, 0], # go down
[ 0, 1]] # go right
delta_name = ['^', '<', 'v', '>']
def heuristic(a,b):
(x1,y1) = a
(x2,y2) = b
return abs(x1-x2)+abs(y1-y2)
def search(grid,init,goal,cost):
closed = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
closed[init[0]][init[1]] = 1
expand =[[-1]*len(grid[0])for i in grid]
x = init[0]
y = init[1]
g = 0
#h = heuristic[x][y]
h = heuristic(goal,init)
f = g + h
#priority Queue
open = [[f, h, g, x, y]]
#expand[x][y] = g
count =0
found = False # flag that is set when search is complete
resign = False # flag set if we can't find expand
path_way = {} #costos de avance
delta_ways ={} #señaes
came_from = {} #camino de retorno
while not found and not resign:
if len(open) == 0:
resign = True
else:
open.sort()
open.reverse()
next = open.pop()
x = next[3]
y = next[4]
g = next[2]
path_way[(next[3],next[4])] = next[2]
expand[x][y] = count
count +=1
if x == goal[0] and y == goal[1]:
found = True
else:
for i in range(len(delta)): #este es el ciclo para aplicar el filtro de los movimientos
x2 = x + delta[i][0] #actual x mas el movimiento delta(x)
y2 = y + delta[i][1] #actual y mas el movimiento en delta(y)
if x2 >= 0 and x2 < len(grid) and y2 >=0 and y2 < len(grid[0]): #filtro de delimitacion de "terreno"
if closed[x2][y2] == 0 and grid[x2][y2] == 0: #verificar q no se repitan los q ya pasaron
g2 = g + cost
#h2 = heuristic[x2][y2]
actual = (x2,y2)
h2 = heuristic(goal,actual)
f2 = h2 + g2
open.append([f2, h2, g2, x2, y2])
closed[x2][y2] = 1
came_from[(x2,y2)]= (x,y) #elimina los caminos que no y los sustituye por camino correcto
delta_ways[(x,y)] = delta_name[i]
start = (init[0],init[1])
current = (goal[0],goal[1])
path = [current] #regresa el camino mas corto
while current != start:
current = came_from[current]
path.append(current)
path.reverse()
#seteando el camino mas corto pero con las señales
path_signals = {}
for way in range(len(path)):
for key in delta_ways:
if key == path[way]:
path_signals[key] = delta_ways[key]
draw = [[' ' for col in range(len(grid[0]))] for row in range(len(grid))]
for way in range(len(path)):
for i in range(len(draw)):
for j in range(len(draw[i])):
verify = (i == path[way][0])
verify2 = (j == path[way][1])
if verify and verify2:
if i == goal[0] and j==goal[1]:
draw[i][j]= '*'
else:
draw[i][j]= path_signals[(i,j)]
print("path_signals: ")
print(path_signals)
print("came_from: ")
print(came_from)
#print("delta_ways ")
#print(delta_ways)
print("costs:")
print(path_way)
print("expand map")
for i in range(len(expand)):
print("%s\n"%(expand[i]))
print("signal map:")
for i in range(len(draw)):
print("%s\n"%(draw[i]))
print("final cost in steps:")
print(len(path)-1)
print("from came_from we get the shortest path: ")
return path
#return next
#return expand
try:
print(search(grid,init,goal,cost))
except Exception as e:
print("fail, no path possible")
print(str(e))
input()
|
"""
Definition of the :class:`ExportDestinationViewSet` class.
"""
from accounts.filters import ExportDestinationFilter
from accounts.models.export_destination import ExportDestination
from accounts.serializers.export_destination import ExportDestinationSerializer
from accounts.tasks import export_mri_session
from pylabber.views.defaults import DefaultsMixin
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.request import Request
from rest_framework.response import Response
EXPORT_HANDLERS: dict = {"django_mri": {"Session": export_mri_session}}
class ExportDestinationViewSet(DefaultsMixin, viewsets.ModelViewSet):
"""
API endpoint that allows
:class:`~accounts.models.export_destination.ExportDestination` instances to
be viewed or edited.
"""
queryset = ExportDestination.objects.order_by("id")
serializer_class = ExportDestinationSerializer
filter_class = ExportDestinationFilter
@action(detail=False, methods=["POST"])
def export_instance(
self, request: Request,
):
try:
app_label = request.data.pop("app_label")
model_name = request.data.pop("model_name")
export_destination_id = request.data.pop("export_destination_id")
instance_id = request.data.pop("instance_id")
except KeyError:
return Response(status.HTTP_400_BAD_REQUEST)
handler = EXPORT_HANDLERS.get(app_label, {}).get(model_name)
if handler:
try:
handler.delay(
export_destination_id, instance_id, **self.request.data
)
except AttributeError:
handler(
export_destination_id, instance_id, **self.request.data
)
finally:
return Response(status.HTTP_200_OK)
return Response(status.HTTP_400_BAD_REQUEST)
|
"""Unit test package for tusdatos_connector."""
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RLsei(RPackage):
"""Solving Least Squares or Quadratic Programming Problems under
Equality/Inequality Constraints.
It contains functions that solve least squares linear regression problems
under linear equality/inequality constraints. Functions for solving
quadratic programming problems are also available, which transform such
problems into least squares ones first. It is developed based on the
'Fortran' program of Lawson and Hanson (1974, 1995), which is public domain
and available at <http://www.netlib.org/lawson-hanson>."""
cran = "lsei"
version('1.3-0', sha256='6289058f652989ca8a5ad6fa324ce1762cc9e36c42559c00929b70f762066ab6')
version('1.2-0', sha256='4781ebd9ef93880260d5d5f23066580ac06061e95c1048fb25e4e838963380f6')
|
import sys
fname = sys.argv[1]
lines = 0
words = 0
letters = 0
for line in open(fname):
lines += 1
letters += len(line)
pos = 'out'
for letter in line:
if letter != ' ' and pos == 'out':
words += 1
pos = 'in'
elif letter == ' ':
pos = 'out'
print("Lines:", lines)
print("Words:", words)
print("Letters:", letters)
|
from graph_ter_seg.runner.backbone_runner import BackboneRunner
from graph_ter_seg.runner.classifier_runner import ClassifierRunner
from graph_ter_seg.runner.eval_runner import EvaluationRunner
from graph_ter_seg.runner.runner import Runner
|
import arcade
from miscellaneous import Misc
from Constants import CIRCLE_RADIUS
from Point import Point
from CollisionHandler import CollisionHandler
from Vector import Vector
import math
class Obstacle:
def __init__(self, A,B, color, width):
self.pointDepart = A
self.pointFinal = B
self.color = color
self.width = width
self.isHit = False
def draw(self):
arcade.draw_line(self.pointDepart.x, self.pointDepart.y, self.pointFinal.x, self.pointFinal.y, self.color, self.width)
def update(self, ball):
self.checkforCollision(ball)
def checkforCollision(self, ball):
if Misc.line_circle_intersection(self,ball.centre.x, ball.centre.y, CIRCLE_RADIUS, self.pointDepart, self.pointFinal) and not self.isHit:
normale = Vector.getNormale(self, self.pointDepart, self.pointFinal, ball.centre)
impact = Misc.projectionCercleObstacle(self, self.pointDepart,self.pointFinal,ball.centre)
incident = ball.vitesse
Vf = CollisionHandler.collisionResolution(self, incident , normale)
ball.vitesse.dx = Vf.dx
ball.vitesse.dy = Vf.dy
self.isHit = True
print("COLLISION")
if not Misc.line_circle_intersection(self,ball.centre.x, ball.centre.y, CIRCLE_RADIUS, self.pointDepart, self.pointFinal) and self.isHit :
self.isHit = False
|
import cv2
import os
import multiprocessing
import numpy as np
import pandas as pd
import re
file = open("..\\yinxiebing\\yinxiebing.csv", encoding='utf8')
your_data_path = '..\\yinxiebing\\img_data'
file.readline()
# lines = file.readlines()
# print (lines)
def cut_image(line):
# print(line)
# print(line.split(","))
img_name = line.split(',')[1]
img = cv2.imread(os.path.join(your_data_path, img_name))
# print(line.split('"[')[1].split(']"')[0])
parse_info = line.split('"[')[1].split(']"')[0]
# print(line.split(',')[-2])
dr_id = line.split(',')[-2]
# state=''
# x=0
# y=0
# w=0
# h=0
# print (parse_info)
for record in parse_info.split('],["'):
# print("{}: {}_{}".format(img_name, record, record.count('],[')))
# if record.count('],[') == 1:
# print(record)
# state = record.split('"",')[0].split('"')[-1]
# x = (int)(record.split('[[')[1].split(',')[0])
# y = (int)(record.split('],[')[0].split(',')[-1])
# w = (int)((float)(record.split('],[')[1].split(',')[0]))
# h = (int)((float)(record.split(']]')[0].split(',')[-1]))
# print("Saving {}...".format(os.path.join("..\\yinxiebing\\cut_images",
# "{}_{}_{}_{},{}_{},{}.jpg".format(
# img_name.split('.jpg')[0],
# state,
# dr_id,
# x,
# y,
# w,
# h))))
#
# cv2.imwrite(os.path.join("..\\yinxiebing\\cut_images",
# "{}_{}_{}_{},{}_{},{}.jpg".format(
# img_name.split('.jpg')[0],
# state,
# dr_id,
# x,
# y,
# w,
# h)),
# img[y:y+h, x:x+w])
if record.count('],[') > 1:
record_list = record.split(',[[')[1].split(']],')[0].split('],[')
# print(record_list)
state = record.split('"",')[0].split('"')[-1]
min_x = (int)(record_list[0].split(',')[0])
min_y = (int)(record_list[0].split(',')[1])
max_x = (int)(record_list[0].split(',')[0])
max_y = (int)(record_list[0].split(',')[1])
for point in record_list:
min_x = (int)(point.split(',')[0]) if ((int)(point.split(',')[0]) < min_x) else min_x
min_y = (int)(point.split(',')[1]) if ((int)(point.split(',')[1]) < min_y) else min_y
max_x = (int)(point.split(',')[0]) if ((int)(point.split(',')[0]) > max_x) else max_x
max_y = (int)(point.split(',')[1]) if ((int)(point.split(',')[1]) > max_y) else max_y
# print('[{},{}],[{},{}]'.format(min_x, min_y, max_x, max_y))
print("Saving {}...".format(os.path.join("..\\yinxiebing\\cutted_images",
"{}_{}_{}_{},{}_{},{}.jpg".format(
img_name.split('.jpg')[0],
state,
dr_id,
min_x,
min_y,
max_x - min_x,
max_y - min_y))))
cv2.imwrite(os.path.join("..\\yinxiebing\\cutted_images",
"{}_{}_{}_{},{}_{},{}.jpg".format(
img_name.split('.jpg')[0],
state,
dr_id,
min_x,
min_y,
max_x - min_x,
max_y - min_y)),
img[min_y:max_y, min_x:max_x])
if __name__ == '__main__':
print("Parent process {}".format(os.getpid()))
p = multiprocessing.Pool(24)
for line in file.readlines():
# p.apply_async(cut_image, args=(line, ))
cut_image(line)
print("Waiting all subprocesses done...")
# p.close()
# p.join()
print("All subprocesses done!")
|
#
# libjingle
# Copyright 2015 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'includes': ['common.gypi',],
'conditions': [
['OS=="ios" or (OS=="mac" and mac_sdk>="10.8")', {
'targets': [
{
'target_name': 'libjingle_peerconnection_objc_no_op',
'includes': [ 'objc_app.gypi' ],
'type': 'executable',
'dependencies': [
'../libjingle.gyp:libjingle_peerconnection_objc',
],
'sources': ['<(DEPTH)/webrtc/build/no_op.cc',],
},
],
}]
],
}
|
import os
from discord.ext.commands import AutoShardedBot
emojis: dict = {
"Ql": "<:Ql:827569943955505153>",
"kl": "<:kl:827569943972544523>",
"qd": "<:qd:827569943985127436>",
"nl": "<:nl:827569944057217085>",
"Pl": "<:Pl:827569944169676901>",
"ql": "<:ql:827569944178589716>",
"Kl": "<:Kl:827569944203624498>",
"Qd": "<:Qd:827569944228135003>",
"kd": "<:kd:827569944245829633>",
"Rl": "<:Rl:827569944270602271>",
"rl": "<:rl:827569944278597632>",
"Kd": "<:Kd:827569944279121951>",
"bd": "<:bd:827569944304156682>",
"Pd": "<:Pd:827569944312152075>",
"bl": "<:bl:827569944328667156>",
"rd": "<:rd:827569944333254696>",
"pl": "<:pl:827569944358944788>",
"nd": "<:nd:827569944401149972>",
"Bd": "<:Bd:827569944404819978>",
"Nl": "<:Nl:827569944438505482>",
"Bl": "<:Bl:827569944442830919>",
"pd": "<:pd:827569944472453150>",
"-l": "<:_l:827569944501288970>",
"Rd": "<:Rd:827569944517541919>",
"Nd": "<:Nd:827569944681775144>",
"-d": "<:_d:827569944991367228>",
"r_": "<:r_:827847488449937428>",
"R_": "<:R_:827847488500269066>",
"Q_": "<:Q_:827847488517308437>",
"p_": "<:p_:827847488558858291>",
"N_": "<:N_:827847488563052564>",
"n_": "<:n_:827847488576028702>",
"k_": "<:k_:827847488579960863>",
"P_": "<:P_:827847488622952478>",
"B_": "<:B_:827847488630292500>",
"b_": "<:b_:827847488651657216>",
"K_": "<:K_:827847488677347338>",
"q_": "<:q_:827847488752189461>",
}
def figure_to_emoji(figure: str, field_number: int):
light_dark = "d" if not field_number else "l"
light_dark = "_" if field_number == 3 else light_dark
return emojis[figure + light_dark]
def letter_to_emoji(letter: str) -> str:
if letter.lower() == "r":
return "🇷"
return f"🇦🇧🇨🇩🇪🇫🇬🇭"[ord(letter[0]) - ord("a")]
def number_to_emoji(number: str) -> str:
n = int(number)
return ["1️⃣2️⃣3️⃣4️⃣5️⃣6️⃣7️⃣8️⃣"[i:i + 3] for i in range(0, 8 * 3, 3)][n - 1]
def get_letter_by_emote(emote: str) -> str:
for k, v in emojis.items():
if v == emote:
return k[0]
def add_cogs(bot: AutoShardedBot, cogs):
for cog_c in cogs:
bot.add_cog(cog_c(bot))
def delete_folder_contents(path: str):
path += "/" if path[-1] != "/" else ""
for f in os.listdir(path):
os.remove(path + f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.