content
stringlengths 5
1.05M
|
|---|
# Compatibility Python 2/3
from __future__ import division, print_function, absolute_import
from builtins import range
from past.builtins import basestring
# ----------------------------------------------------------------------------------------------------------------------
import numpy as np
import opto.log as log
from timeit import default_timer as timer
import logging
logger = logging.getLogger(__name__)
def is_dominated(x, y, minimize=True):
"""
Compute if x is dominated by y
:param x: np.matrix [N_OBJECTIVES]
:param y: np.matrix [N_POINTS, N_OBJECTIVES]
:param minimize: bool True=> compute PF that minimize, False=> compute PF that maximize
:return:
"""
if minimize:
return np.all(y <= x, axis=1)
else:
return np.all(y >= x, axis=1)
def dominates(x, y, minimize=True):
"""
Compute if x dominates y
:param x: np.matrix [N_OBJECTIVES]
:param y: np.matrix [N_POINTS, N_OBJECTIVES]
:param minimize: bool. True=> compute PF that minimize, False=> compute PF that maximize
:return:
"""
if minimize:
return np.all(x <= y, axis=1)
else:
return np.all(x >= y, axis=1)
def is_pareto_optimal_1(objectives, minimize=True):
"""
:param costs: An [N_OBJECTIVES, N_POINTS] matrix
:return: A (n_points, ) boolean array, indicating whether each point is Pareto optimal
"""
objectives = objectives.T
is_PF = np.ones(objectives.shape[0], dtype=bool)
for i, c in enumerate(objectives):
is_PF[i] = (np.sum(is_dominated(x=c, y=objectives[is_PF], minimize=minimize)) <= 1)
# (note that each point is dominated by each self)
return is_PF
def is_pareto_optimal_1b(objectives, minimize=True):
"""
:param objectives: An [N_OBJECTIVES, N_POINTS] matrix
:return: A (n_points, ) boolean array, indicating whether each point is Pareto optimal
"""
objectives = objectives.T # [N_POINTS, N_OBJECTIVES]
is_PF = np.ones(objectives.shape[0], dtype=bool)
for i, c in enumerate(objectives):
if is_PF[i]:
is_PF[is_PF] = np.array(np.invert(dominates(x=c, y=objectives[is_PF], minimize=minimize))).squeeze()
is_PF[i] = True
# Remove dominated points (note that each point is dominated by each self)
return is_PF
def is_pareto_optimal_2(costs, minimize=True):
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
costs = costs.T
is_efficient = np.ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(costs[is_efficient] <= c, axis=1) # Remove dominated points
return is_efficient
def is_pareto_optimal_3(costs, minimize=True):
# From http://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
"""
:param costs: An (n_points, n_costs) array
:return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient
"""
costs = costs.T
is_efficient = np.ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
is_efficient[i] = np.all(np.any(costs >= c, axis=1))
return is_efficient
# def is_pareto_optimal_4(objectives, minimize=True):
# nObj = objectives.shape[0]
#
# if nObj == 2:
# # TODO: stablesort
# if func[0] == 'minimize':
# idx = np.argsort(objectives[0])
# if func[0] == 'maximize':
# idx = np.argsort(objectives[0])[::-1]
# idx_PF = [idx[0]] # TODO: bug! first element is always in the PF!
# cur = objectives[1, idx[0]]
# if func[1] == 'minimize':
# for i in idx:
# if objectives[1, i] < cur:
# idx_PF.append(i)
# cur = objectives[1, i]
# if func[1] == 'maximize':
# for i in idx:
# if objectives[1, i] > cur:
# idx_PF.append(i)
# cur = objectives[1, i]
# PF = objectives[:, idx_PF]
#
# if nObj > 2:
# # Use simple_cull
# # TODO: accept func with multiple value
# if func[0] == 'maximize':
# f = dominates_max
# if func[0] == 'minimize':
# f = dominates_min
# dominated = []
# cleared = []
# remaining = np.transpose(objectives)
# nPointsRemaning = remaining.shape[0]
# while nPointsRemaning > 0:
# # print(nPointsRemaning)
# candidate = remaining[0]
# new_remaining = []
# for other in remaining[1:]:
# [new_remaining, dominated][f(candidate, other)].append(other)
# if not any(f(other, candidate) for other in new_remaining):
# cleared.append(candidate)
# else:
# dominated.append(candidate)
# remaining = np.array(new_remaining)
# nPointsRemaning = remaining.shape[0]
# PF = np.transpose(np.array(cleared))
# dom = np.transpose(np.array(dominated))
def paretoFront(objectives, parameters=None, func='minimize'):
""" Compute the Pareto Front
:param objectives: [N_OBJECTIVES, N_POINTS]
:param parameters: [N_PARAMETERS, N_POINTS]
:param func: if func is a single string, it will be considered as the same value for all objectives.
Alternatively, it is possible to present a list (currently not implemented).
:return:
PF [N_OBJECTIVES, N_POINTS]
PF_par [N_PARAMETERS, N_POINTS]
"""
nObj = objectives.shape[0]
if parameters is not None:
nPars = parameters.shape[0]
assert objectives.shape[1] == parameters.shape[1], 'Inconsistent size objectives - parameters'
assert isinstance(func, basestring), 'currently only single values are accepted' # TODO: allow multiple values
if isinstance(func, basestring):
func = [func] * nObj
_startTime = timer()
logging.info('Computing PF')
idx_PF = is_pareto_optimal_1b(objectives, minimize=True)
PF = objectives[:, idx_PF]
if parameters is not None:
PF_PAR = parameters[:, idx_PF]
end = timer()
logging.info('PF computed in %f[s]' % (end - _startTime))
logging.info('Identified %d points in the PF' % (PF.shape[1]))
assert PF.shape[0] == nObj, 'Inconsistent size PF'
if parameters is None:
return PF
else:
PF_PAR = parameters[:, idx_PF]
assert PF_PAR.shape[0] == nPars, 'Inconsistent size PF'
return PF, PF_PAR
def paretoFrontIdx(objectives, parameters=None, func='minimize'):
""" Compute idx of the Pareto Front
:param objectives: [N_OBJECTIVES, N_POINTS]
:param parameters: [N_PARAMETERS, N_POINTS]
:param func: if func is a single string, it will be considered as the same value for all objectives.
Alternatively, it is possible to present a list (currently not implemented).
:return:
PF [N_OBJECTIVES, N_POINTS]
PF_par [N_PARAMETERS, N_POINTS]
"""
nObj = objectives.shape[0]
if parameters is not None:
nPars = parameters.shape[0]
assert objectives.shape[1] == parameters.shape[1], 'Inconsistent size objectives - parameters'
assert isinstance(func, basestring), 'currently only single values are accepted' # TODO: allow multiple values
if isinstance(func, basestring):
func = [func] * nObj
_startTime = timer()
logging.info('Computing PF')
idx_PF = is_pareto_optimal_1b(objectives, minimize=True)
return idx_PF
|
from tokens import *
from baseorder import BaseOrder, log
from dexible.common import Price, as_units
from dexible.exceptions import *
import asyncio
TOKEN_IN = DAI_KOVAN
TOKEN_OUT = WETH_KOVAN
IN_AMT = as_units(500, 18)
async def main():
sdk = BaseOrder.create_dexible_sdk()
token_in = await sdk.token.lookup(TOKEN_IN)
token_out = await sdk.token.lookup(TOKEN_OUT)
twap = BaseOrder(
sdk=sdk,
token_in=TOKEN_IN,
token_out=TOKEN_OUT,
amount_in=IN_AMT,
algo_details={
"type": "TWAP",
"params": {
"time_window": {"minutes": 7},
"price_range": {
"base_price": Price.units_to_price(in_token=token_in,
out_token=token_out,
in_units=1,
out_units=.00133),
"lower_bound_percent": 1,
"upper_bound_percent": 1},
"gas_policy": {
"type": "relative",
"deviation": 0
},
"slippage_percent": 5
}
})
try:
order = await twap.create_order()
log.info("Submitting order...")
result = await order.submit()
log.info(f"Order result: {result}")
except InvalidOrderException as e:
log.error(f"Probem with order: {e}")
except QuoteMissingException as e:
log.error(f"Could not generate quote: {e}")
except DexibleException as e:
log.error(f"Generic problem: {e}")
if __name__ == '__main__':
asyncio.run(main())
|
import os
import json
import yaml
class Context:
"""
Conversation Context Object
"""
def __init__(self, db, userProfile):
"""
Initialize Context Object
:param db: database for state storage
:param userProfile: user profile object
:type db: db object
:type userProfile: json/dictionary
"""
self.db = db
self.userProfile = userProfile
def update_user_info(self, key, value):
"""
Update dynamodb user detail
:param value: value to update the item with
:type key: string
:type value: string
"""
pk = {
'userId' : {
'S' : self.userProfile['id']
}
}
self.db.update_item(
pk,
{
key : {
'Value' : {
'S' : value
}
}
}
)
def get_context(self, key):
"""
Get Conversational Context
:param key: context item key
:type key: string
:returns: context info
:rtype: string
"""
if 'contextInfo:' + key in self.userProfile:
return self.userProfile['contextInfo:' + key]
return None
def save_context(self, keyVal):
"""
Save conversational context
:param keyVal: key value to save
:type keyVal: json/dictionary
"""
pk = {
'userId' : {
'S' : self.userProfile['id']
}
}
self.db.update_item(
pk,
{
'contextInfo:' + keyVal['key'] : {
'Value' : {
'S' : keyVal['value']
}
}
}
)
class StateRouter:
"""
Chatbot State Transition Router
"""
def __init__(self, userObject, msgObject, outputBuilder, stateService, profileBuilder=None):
"""
Initialize State Router Module
:param userObject: chatbot user info
:param msgObject: chatbot user msg
:param outputBuilder: messaging platform outbound builder
:param stateService: chatbot state service
:param profileBuilder: optional userprofile builder
"""
self.profileBuilder = profileBuilder
self.userObject = userObject
self.msgObject = msgObject
self.outputBuilder = outputBuilder
self.stateService = stateService
def _next_state(self, state):
"""
Update User State
:param state: state name
:type state: string
"""
self.stateService.update_session_state(
self.userObject['id'],
state
)
def _init_user(self):
"""
Initilize User Record
:returns: user profile object
:rtype: json/dictionary
"""
userProfile = self.profileBuilder(self.userObject['id'], self.stateService)()
self.stateService.init_user_session(userProfile)
return userProfile
def _get_user_profile(self):
"""
Get User Profile Record
:returns: user profile record
:rtype: json/dictionary
"""
userProfile = self.stateService._get_user_profile(self.userObject['id'])
if userProfile == None:
return self._init_user()
return userProfile
def _get_init_state(self):
"""
Get initial state
:returns: initial chatbot state
:rtype: string
"""
masterFile = open('src/views/master.yml').read()
masterConfig = yaml.safe_load(masterFile)
return masterConfig['init_state']
def execute(self):
"""
Execute Current State
"""
|
# Standard libraries
import contractions
import re
import string
import enum
# Nltk download
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
class TextCleaner:
def __init__(self):
self.settings = ['rm_punctuation', 'rm_numeric', 'lowerize', 'rm_stopwords']
self.white_list = []
self.black_list = []
self.lang_settings = ['english']
def lang_stopwords(self):
stop_words = set()
[stop_words.update(set(stopwords.words(lang.lower()))) for lang in self.lang_settings]
return stop_words
def action_settings(self):
""" map settings to their respective actions. """
settings = {
'rm_punctuation': self.remove_punctuation,
'rm_numeric' : self.remove_numeric,
'lowerize' : self.lowerize,
'rm_stopwords' : self.remove_stopwords,
'stem_words' : self.stemming,
'rm_long_words' : self.remove_long_words
}
return settings
def clean_text(self, to_clean, **kwargs):
to_clean = contractions.fix(to_clean) # Expand contractions
tokens = self.tokenize(to_clean)
for param in self.settings:
tokens = self.action_settings()[param](tokens)
[tokens.remove(w) for w in tokens if w == ''] # Remove empty tokens
if 'tokenize' in kwargs:
if kwargs['tokenize']:
return tokens
else:
return ' '.join(tokens)
def tokenize(self, text):
""" Tokenize string and split numeric and alpha characters
:return: list of tokens
"""
tokens = word_tokenize(text)
splitted = []
for w in tokens:
splitted += re.split(r'(\d+)', w)
splitted = self.rm_empty_tokens(splitted)
return splitted
def remove_punctuation(self, tokens):
future_tokens = []
for text in tokens:
try:
future_tokens.append(''.join(character for character in text if character not in string.punctuation))
except TypeError as e:
print(f'Error removing punctuation from token:\n{e}')
return self.rm_empty_tokens(future_tokens)
@staticmethod
def remove_numeric(tokens):
return [w for w in tokens if not w.isdigit()]
@staticmethod
def lowerize(tokens):
return [w.lower() for w in tokens]
def remove_stopwords(self, tokens):
return [w for w in tokens if w not in self.create_stopwords()]
@staticmethod
def stemming(tokens):
porter = PorterStemmer()
return [porter.stem(word) for word in tokens]
@staticmethod
def remove_long_words(tokens):
return [w for w in tokens if len(w) < 13]
def create_stopwords(self):
stop_words = set(self.black_list)
stop_words.update(self.lang_stopwords())
stop_words = [w for w in stop_words if w not in self.white_list]
return stop_words
@staticmethod
def rm_empty_tokens(tokens):
return list(filter(None, tokens))
|
# Copyright 2015-2021 Mathieu Bernard
#
# This file is part of phonemizer: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Phonemizer is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phonemizer. If not, see <http://www.gnu.org/licenses/>.
"""Espeak backend for the phonemizer"""
import abc
import distutils.spawn
import itertools
import os
import re
import shlex
import subprocess
import tempfile
import joblib
from phonemizer.backend.base import BaseBackend
from phonemizer.logger import get_logger
from phonemizer.punctuation import Punctuation
from phonemizer.separator import default_separator
from phonemizer.utils import list2str, chunks, cumsum
# a regular expression to find language switching flags in espeak output,
# Switches have the following form (here a switch from English to French):
# "something (fr) quelque chose (en) another thing".
_ESPEAK_FLAGS_RE = re.compile(r'\(.+?\)')
# a global variable being used to overload the default espeak installed on the
# system. The user can choose an alternative espeak with the method
# EspeakBackend.set_espeak_path().
_ESPEAK_DEFAULT_PATH = None
class BaseEspeakBackend(BaseBackend):
"""Abstract espeak backend for the phonemizer
Base class of the concrete backends Espeak and EspeakMbrola. It provides
facilities to find espeak executable path and read espeak version.
"""
espeak_version_re = r'.*: ([0-9]+(\.[0-9]+)+(\-dev)?)'
@staticmethod
def set_espeak_path(fpath):
"""Sets the espeak executable as `fpath`"""
global _ESPEAK_DEFAULT_PATH
if not fpath:
_ESPEAK_DEFAULT_PATH = None
return
if not (os.path.isfile(fpath) and os.access(fpath, os.X_OK)):
raise ValueError(
f'{fpath} is not an executable file')
_ESPEAK_DEFAULT_PATH = os.path.abspath(fpath)
@staticmethod
def espeak_path():
"""Returns the absolute path to the espeak executable"""
if 'PHONEMIZER_ESPEAK_PATH' in os.environ:
espeak = os.environ['PHONEMIZER_ESPEAK_PATH']
if not (os.path.isfile(espeak) and os.access(espeak, os.X_OK)):
raise ValueError(
f'PHONEMIZER_ESPEAK_PATH={espeak} '
f'is not an executable file')
return os.path.abspath(espeak)
if _ESPEAK_DEFAULT_PATH:
return _ESPEAK_DEFAULT_PATH
espeak = distutils.spawn.find_executable('espeak-ng')
if not espeak: # pragma: nocover
espeak = distutils.spawn.find_executable('espeak')
return espeak
@classmethod
def is_available(cls):
return bool(cls.espeak_path())
@classmethod
def long_version(cls):
"""Returns full version line
Includes data path and detailed name (espeak or espeak-ng).
"""
return subprocess.check_output(shlex.split(
'{} --help'.format(cls.espeak_path()), posix=False)).decode(
'utf8').split('\n')[1]
@classmethod
def is_espeak_ng(cls):
"""Returns True if using espeak-ng, False otherwise"""
return 'eSpeak NG' in cls.long_version()
@classmethod
def version(cls, as_tuple=False):
# the full version version string includes extra information
# we don't need
long_version = cls.long_version()
# extract the version number with a regular expression
try:
version = re.match(cls.espeak_version_re, long_version).group(1)
except AttributeError:
raise RuntimeError(
f'cannot extract espeak version from {cls.espeak_path()}')
if as_tuple:
# ignore the '-dev' at the end
version = version.replace('-dev', '')
version = tuple(int(v) for v in version.split('.'))
return version
@abc.abstractmethod
def _command(self, fname):
pass
@abc.abstractmethod
def _postprocess_line(self, line, separator, strip):
pass
class EspeakBackend(BaseEspeakBackend):
"""Espeak backend for the phonemizer"""
def __init__(self, language,
punctuation_marks=Punctuation.default_marks(),
preserve_punctuation=False,
language_switch='keep-flags',
with_stress=False,
logger=get_logger()):
super().__init__(
language, punctuation_marks=punctuation_marks,
preserve_punctuation=preserve_punctuation, logger=logger)
self.logger.debug('espeak is %s', self.espeak_path())
# adapt some command line option to the espeak version (for
# phoneme separation and IPA output)
version = self.version()
self.sep = '--sep=_'
if version == '1.48.03' or version.split('.')[1] <= '47':
self.sep = '' # pragma: nocover
self.ipa = '--ipa=3'
if self.is_espeak_ng(): # this is espeak-ng
self.ipa = '-x --ipa'
# ensure the lang_switch argument is valid
valid_lang_switch = [
'keep-flags', 'remove-flags', 'remove-utterance']
if language_switch not in valid_lang_switch:
raise RuntimeError(
'lang_switch argument "{}" invalid, must be in {}'
.format(language_switch, ", ".join(valid_lang_switch)))
self._lang_switch = language_switch
self._with_stress = with_stress
@staticmethod
def name():
return 'espeak'
@classmethod
def supported_languages(cls):
# retrieve the languages from a call to 'espeak --voices'
voices = subprocess.check_output(shlex.split(
'{} --voices'.format(cls.espeak_path()), posix=False)).decode(
'utf8').split('\n')[1:-1]
voices = [v.split() for v in voices]
return {v[1]: v[3].replace('_', ' ') for v in voices}
def phonemize(self, text, separator=default_separator,
strip=False, njobs=1):
text, text_type, punctuation_marks = self._phonemize_preprocess(text)
lang_switches = []
if njobs == 1:
# phonemize the text forced as a string
text, lang_switches = self._phonemize_aux(
list2str(text), separator, strip)
else:
# If using parallel jobs, disable the log as stderr is not
# picklable.
self.logger.info('running %s on %s jobs', self.name(), njobs)
log_storage = self.logger
self.logger = None
# divide the input text in chunks, each chunk being processed in a
# separate job
text_chunks = chunks(text, njobs)
# offset used below to recover the line numbers in the input text
# wrt the chunks
offset = [0] + cumsum((c.count('\n')+1 for c in text_chunks[:-1]))
# we have here a list of (phonemized chunk, lang_switches)
output = joblib.Parallel(n_jobs=njobs)(
joblib.delayed(self._phonemize_aux)(t, separator, strip)
for t in text_chunks)
# flatten both the phonemized chunks and language switches in a
# list. For language switches lines we need to add an offset to
# have the correct lines numbers wrt the input text.
text = list(itertools.chain(*(chunk[0] for chunk in output)))
lang_switches = [chunk[1] for chunk in output]
for i, _ in enumerate(lang_switches):
for j, _ in enumerate(lang_switches[i]):
lang_switches[i][j] += offset[i]
lang_switches = list(itertools.chain(*lang_switches))
# restore the log as it was before parallel processing
self.logger = log_storage
# warn the user if language switches occured during phonemization
self._warn_on_lang_switch(lang_switches)
# finally restore the punctuation
return self._phonemize_postprocess(
text, text_type, punctuation_marks)
def _command(self, fname):
return (
f'{self.espeak_path()} -v{self.language} {self.ipa} '
f'-q -f {fname} {self.sep}')
def _phonemize_aux(self, text, separator, strip):
output = []
lang_switch_list = []
for num, line in enumerate(text.split('\n'), start=1):
with tempfile.NamedTemporaryFile(
'w+', encoding='utf8', delete=False) as data:
try:
# save the text as a tempfile
data.write(line)
data.close()
# generate the espeak command to run
command = self._command(data.name)
if self.logger:
self.logger.debug('running %s', command)
# run the command
completed = subprocess.run(
shlex.split(command, posix=False),
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# retrieve the output line (raw phonemization)
line = completed.stdout.decode('utf8')
# ensure all was OK
error = completed.stderr.decode('utf8')
for err_line in error.split('\n'): # pragma: nocover
err_line = err_line.strip()
if err_line:
self.logger.error(err_line)
if error or completed.returncode: # pragma: nocover
raise RuntimeError(
f'espeak failed with return code '
f'{completed.returncode}')
finally:
os.remove(data.name)
line, lang_switch = self._postprocess_line(
line, separator, strip)
output.append(line)
if lang_switch:
lang_switch_list.append(num)
return output, lang_switch_list
def _postprocess_line(self, line, separator, strip):
# espeak can split an utterance into several lines because
# of punctuation, here we merge the lines into a single one
line = line.strip().replace('\n', ' ').replace(' ', ' ')
# due to a bug in espeak-ng, some additional separators can be
# added at the end of a word. Here a quick fix to solve that
# issue. See https://github.com/espeak-ng/espeak-ng/issues/694
line = re.sub(r'_+', '_', line)
line = re.sub(r'_ ', ' ', line)
line, lang_switch = self._process_lang_switch(line)
if not line:
return '', lang_switch
out_line = ''
for word in line.split(u' '):
word = word.strip()
# remove the stresses on phonemes
if not self._with_stress:
word = word.replace("ˈ", '')
word = word.replace('ˌ', '')
word = word.replace("'", '')
word = word.replace("-", '')
if not strip:
word += '_'
word = word.replace('_', separator.phone)
out_line += word + separator.word
if strip and separator.word:
out_line = out_line[:-len(separator.word)]
return out_line, lang_switch
def _process_lang_switch(self, utt):
# look for language swith in the current utterance
flags = re.findall(_ESPEAK_FLAGS_RE, utt)
# no language switch, nothing to do
if not flags:
return utt, False
# ignore the language switch but warn if one is found
if self._lang_switch == 'keep-flags':
return utt, True
if self._lang_switch == 'remove-flags':
# remove all the (lang) flags in the current utterance
for flag in set(flags):
utt = utt.replace(flag, '')
else: # self._lang_switch == 'remove-utterances':
# drop the entire utterance
return None, True
return utt, True
def _warn_on_lang_switch(self, lang_switches):
# warn the user on language switches fount during phonemization
if lang_switches:
nswitches = len(lang_switches)
if self._lang_switch == 'remove-utterance':
self.logger.warning(
'removed %s utterances containing language switches '
'(applying "remove-utterance" policy)', nswitches)
else:
self.logger.warning(
'%s utterances containing language switches '
'on lines %s', nswitches,
', '.join(str(l) for l in lang_switches))
self.logger.warning(
'extra phones may appear in the "%s" phoneset',
self.language)
if self._lang_switch == "remove-flags":
self.logger.warning(
'language switch flags have been removed '
'(applying "remove-flags" policy)')
else:
self.logger.warning(
'language switch flags have been kept '
'(applying "keep-flags" policy)')
class EspeakMbrolaBackend(BaseEspeakBackend):
"""Espeak-mbrola backend for the phonemizer"""
# this will be initialized once, at the first call to supported_languages()
_supported_languages = None
def __init__(self, language, logger=get_logger()):
super().__init__(language, logger=logger)
self.logger.debug('espeak is %s', self.espeak_path())
@staticmethod
def name():
return 'espeak-mbrola'
@staticmethod
def is_available():
return (
BaseEspeakBackend.is_available() and
distutils.spawn.find_executable('mbrola') is not None)
@classmethod
def _all_supported_languages(cls):
# retrieve the voices from a call to 'espeak --voices=mb'. This voices
# must be installed separately.
voices = subprocess.check_output(shlex.split(
f'{cls.espeak_path()} --voices=mb', posix=False)).decode(
'utf8').split('\n')[1:-1]
voices = [voice.split() for voice in voices]
return {voice[4][3:]: voice[3] for voice in voices}
@classmethod
def _is_language_installed(cls, language):
"""Returns True if the required mbrola voice is installed"""
command = f'{cls.espeak_path()} --stdin -v {language} -q --pho'
completed = subprocess.run(
shlex.split(command, posix=False),
input=b'',
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if completed.stderr.decode('utf8'):
return False
return True
@classmethod
def supported_languages(cls): # pragma: nocover
"""Returns the list of installed mbrola voices"""
if cls._supported_languages is None:
cls._supported_languages = {
k: v for k, v in cls._all_supported_languages().items()
if cls._is_language_installed(k)}
return cls._supported_languages
def _command(self, fname):
return f'{self.espeak_path()} -v {self.language} -q -f {fname} --pho'
def _phonemize_aux(self, text, separator, strip):
output = []
for line in text.split('\n'):
with tempfile.NamedTemporaryFile(
'w+', encoding='utf8', delete=False) as data:
try:
# save the text as a tempfile
data.write(line)
data.close()
# generate the espeak command to run
command = self._command(data.name)
if self.logger:
self.logger.debug('running %s', command)
# run the command
completed = subprocess.run(
shlex.split(command, posix=False),
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# retrieve the output line (raw phonemization)
line = completed.stdout.decode('utf8')
# ensure all was OK
error = completed.stderr.decode('utf8')
for err_line in error.split('\n'): # pragma: nocover
err_line = err_line.strip()
if err_line:
self.logger.error(err_line)
if error or completed.returncode: # pragma: nocover
raise RuntimeError(
f'espeak failed with return code '
f'{completed.returncode}')
finally:
os.remove(data.name)
line = self._postprocess_line(line, separator, strip)
output.append(line)
return output
def _postprocess_line(self, line, separator, strip):
# retrieve the phonemes with the correct SAMPA alphabet (but
# without word separation)
phonemes = (
l.split('\t')[0] for l in line.split('\n') if l.strip())
phonemes = separator.phone.join(pho for pho in phonemes if pho != '_')
if not strip:
phonemes += separator.phone
return phonemes
|
from .base import *
DEBUG=True
HOST = os.getenv("DEBUG_HOST")
ALLOWED_HOSTS = ["*"]
AUTH_SERVER_LOGIN = ROOT_SERVER + "/login"
AUTH_SERVER_AUTHENTICATE = ROOT_SERVER + "/authenticate"
AUTH_SERVER_LOGOUT = ROOT_SERVER + "/logout"
AUTH_SERVER_TOKEN = ROOT_SERVER + "/token"
if os.getenv("GITHUB_WORKFLOW"):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github_actions',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
else: # .env
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.getenv('DB_NAME'), # dbname
'USER': os.getenv('MYSQL_ID'), # master username
'PASSWORD': os.getenv('MYSQL_PW'), # master password
'HOST': os.getenv('MYSQL_IP'), # Endpoint
'PORT': os.getenv('MYSQL_PORT'),
'OPTIONS': {'charset': 'utf8mb4'},
}
}
|
# -*- coding: utf-8-*-
'''
Copyright 2015 David J Murray
License: MIT
'''
# Python Library imports
import feedparser
import logging
import re
from datetime import datetime
logger = logging.getLogger(__name__)
WORDS = ["HOROSCOPE", ]
PRIORITY = 1
def getDailyHoroscope(zodiacSign):
'''
Get horoscope from astrology.com rss feed.
Arguments:
zodiacSign -- text of zodiac sign
Returns:
horoscope text -- text
'''
DAILY_RSS_URL = "http://www.astrology.com/horoscopes/daily-horoscope.rss"
# Cut down on extra work of parser by disabling Relative URI's Resolution
feedparser.RESOLVE_RELATIVE_URIS = 0
feed = feedparser.parse(DAILY_RSS_URL)
# Check for well-formed feed
if feed.bozo == 1:
logger.error("Not a well formed feed. Not using it.")
text = "No horoscope found today."
return text
# <rss><channel><item><title>[zodiac sign] Daily Horoscope for [date %b %d, %Y]</title>
# <rss><channel><item><description><p>[Text is here]</p></description>
for item in feed['items']:
if zodiacSign in item["title"]:
text = item["description"] # Horoscope in <description>
break
else:
logger.info(zodiacSign + " not found in feed.")
text = "No horoscope found today."
return text
# The horoscope text is in the first paragraph <p></p>
textStart = text.find("<p>") + 3
textEnd = text.find("</p>", textStart)
return text[textStart:textEnd]
def getZodiac(birthDate):
'''
Calculate the zodiac or star sign from a date.
Arguments:
date -- user-input, the birthdate
Return:
text -- zodiac sign
The code was based on:
https://stackoverflow.com
/questions/3274597
/how-would-i-determine-zodiac-astrological-star-sign-from-a-birthday-in-python
'''
ZODIACS = [(120, 'Capricorn'), (218, 'Aquarius'), (320, 'Pisces'),
(420, 'Aries'), (521, 'Taurus'), (621, 'Gemini'),
(722, 'Cancer'), (823, 'Leo'), (923, 'Virgo'),
(1023, 'Libra'), (1122, 'Scorpio'), (1222, 'Sagittarius'),
(1231, 'Capricorn')]
dateNumber = birthDate.date().month * 100 + birthDate.date().day
for zodiac in ZODIACS:
if dateNumber < zodiac[0]:
return zodiac[1]
else:
logger.info("No zodiac sign for date {birthDate}".format(birthDate=birthDate))
return ""
def handle(text, mic, profile):
'''
Read todays horoscope from ycombiner.
Responds to or handles user-input, typically speech text.
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
Requires:
birth_date: 01 Jan 2001
in profile.yml
'''
firstName = profile["first_name"]
try:
birthDate = profile["birth_date"]
except KeyError:
logger.info("No birth_date in profile.yml.")
mic.say("Please add your birth_date to the profile, so I can get your horoscope.")
return
if birthDate is None:
logger.info("Blank birth_date in profile.yml")
mic.say("Please check your birth_date to the profile, so I can get your horoscope.")
return
birthDate = datetime.strptime(birthDate, "%d %b %Y")
zodiacSign = getZodiac(birthDate)
mic.say("{firstName}, your zodiac or star sign is {zodiacSign}. and "
.format(zodiacSign=zodiacSign, firstName=firstName))
text = getDailyHoroscope(zodiacSign)
mic.say("your horoscope states that " + text)
return
def isValid(text):
'''
Returns True if the input is related to this modules.
Arguments:
text -- user-input, typically transcribed speech
'''
return bool(re.search(r'\bhoroscope\b',
text, re.IGNORECASE))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################################
import numpy as np
######################################
##### ネットワークの初期設定を行う #####
######################################
class NWSetup:
def __init__(self, nw_cond, target_cond):
# self.nw_model = nw_cond[0]
self.strand = nw_cond[1]
# self.n_strand = nw_cond[2]
self.n_segments = nw_cond[3]
self.n_cell = nw_cond[4]
self.n_sc = nw_cond[5]
# self.l_bond = nw_cond[6]
# self.c_n = nw_cond[7]
self.multi = target_cond[0]
################################################################################
# レギュラー・ネットワーク設定
################################################################################
def calc_all(self):
# 架橋点 JP を設定
jp_xyz, strand_se_xyz = self.calc_jp_strands()
#
calcd_data_dic = self.set_atom(jp_xyz, strand_se_xyz)
return calcd_data_dic
################################################################################
# JPおよびサブチェインの始点と終点のXYZを設定
def calc_jp_strands(self):
# jp_xyz は、JPの座標のリスト
# strand_se_xyz は、サブチェインの出発点と終点のリスト
if self.strand == "3_Chain_S":
# JPを設定
jp_xyz = [
[
[0, 0, 0],
[0, 0.25, 0.25],
[0.25, 0.25, 0.5],
[0.25, 0, 0.75],
[0.5, 0.5, 0.5],
[0.5, 0.75, 0.75],
[0.75, 0.5, 0.25],
[0.75, 0.75, 0]
]
]
# サブチェインの出発点と終点を設定
strand_se_xyz = [
[
[[0, 0, 0], [0, 0.25, 0.25]],
[[0, 0.25, 0.25], [0.25, 0.25, 0.5]],
[[0.25, 0.25, 0.5], [0.25, 0, 0.75]],
[[0.25, 0.25, 0.5], [0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5], [0.5, 0.75, 0.75]],
[[0.5, 0.5, 0.5], [0.75, 0.5, 0.25]],
[[0.75, 0.5, 0.25], [0.75, 0.75, 0]],
[[0.75, 0.5, 0.25], [1, 0.25, 0.25]],
[[0.25, 0, 0.75], [0, 0, 1]],
[[0.5, 0.75, 0.75], [0.25, 1, 0.75]],
[[0.5, 0.75, 0.75], [0.75, 0.75, 1]],
[[0.75, 0.75, 0], [1, 1, 0]]
]
]
elif self.strand == "3_Chain_D":
# JPを設定
jp_xyz = [
[
[0, 0, 0],
[0, 0.25, 0.25],
[0.25, 0.25, 0.5],
[0.25, 0, 0.75],
[0.5, 0.5, 0.5],
[0.5, 0.75, 0.75],
[0.75, 0.5, 0.25],
[0.75, 0.75, 0]
],
[ #ここから二つ目
[0, 0.5, 0.75],
[0, 0.75, 0.5],
[0.25, 0.75, 0.25],
[0.25, 0.5, 0],
[0.5, 0.25, 0],
[0.5, 0, 0.25],
[0.75, 0, 0.5],
[0.75, 0.25, 0.75]
]
]
# サブチェインの出発点と終点を設定
strand_se_xyz = [
[
[[0, 0, 0], [0, 0.25, 0.25]],
[[0, 0.25, 0.25], [0.25, 0.25, 0.5]],
[[0.25, 0.25, 0.5], [0.25, 0, 0.75]],
[[0.25, 0.25, 0.5], [0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5], [0.5, 0.75, 0.75]],
[[0.5, 0.5, 0.5], [0.75, 0.5, 0.25]],
[[0.75, 0.5, 0.25], [0.75, 0.75, 0]],
[[0.75, 0.5, 0.25], [1, 0.25, 0.25]],
[[0.25, 0, 0.75], [0, 0, 1]],
[[0.5, 0.75, 0.75], [0.25, 1, 0.75]],
[[0.5, 0.75, 0.75], [0.75, 0.75, 1]],
[[0.75, 0.75, 0], [1, 1, 0]]
],
[
[[0, 0.5, 0.75], [0, 0.75, 0.5]],
[[0, 0.75, 0.5], [0.25, 0.75, 0.25]],
[[0.25, 0.75, 0.25], [0.25, 0.5, 0]],
[[0.25, 0.5, 0], [0.5, 0.25, 0]],
[[0.5, 0.25, 0], [0.5, 0, 0.25]],
[[0.5, 0, 0.25], [0.75, 0, 0.5]],
[[0.75, 0, 0.5], [0.75, 0.25, 0.75]],
[[0, 0.5, 0.75], [0.25, 0.5, 1]],
[[0.25, 0.75, 0.25], [0.5, 1, 0.25]],
[[0.75, 0.25, 0.75], [0.5, 0.25, 1]],
[[0.75, 0.25, 0.75], [1, 0.5, 0.75]],
[[0.75, 1, 0.5], [1, 0.75, 0.5]]
]
]
elif self.strand == "4_Chain":
# JPを設定
jp_xyz = [
[
[0.0, 0.0, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
[0.5, 0.5, 0.0],
[0.25, 0.25, 0.25],
[0.25, 0.75, 0.75],
[0.75, 0.25, 0.75],
[0.75, 0.75, 0.25]
]
]
# サブチェインの出発点と終点を設定
strand_se_xyz = [
[
[[0.25, 0.25, 0.25], [0.0, 0.0, 0.0]], # No.1
[[0.25, 0.25, 0.25], [0.0, 0.5, 0.5]],
[[0.25, 0.25, 0.25], [0.5, 0.0, 0.5]],
[[0.25, 0.25, 0.25], [0.5, 0.5, 0.0]],
[[0.25, 0.75, 0.75], [0.0, 0.5, 0.5]], # No.2
[[0.25, 0.75, 0.75], [0.0, 1.0, 1.0]],
[[0.25, 0.75, 0.75], [0.5, 0.5, 1.0]],
[[0.25, 0.75, 0.75], [0.5, 1.0, 0.5]],
[[0.75, 0.25, 0.75], [0.5, 0.0, 0.5]], # No.3
[[0.75, 0.25, 0.75], [0.5, 0.5, 1.0]],
[[0.75, 0.25, 0.75], [1.0, 0.0, 1.0]],
[[0.75, 0.25, 0.75], [1.0, 0.5, 0.5]],
[[0.75, 0.75, 0.25], [0.5, 0.5, 0.0]], # No.4
[[0.75, 0.75, 0.25], [0.5, 1.0, 0.5]],
[[0.75, 0.75, 0.25], [1.0, 0.5, 0.5]],
[[0.75, 0.75, 0.25], [1.0, 1.0, 0.0]]
]
]
elif self.strand == "6_Chain":
# JPを設定
jp_xyz = [
[
[0.,0.,0.]
]
]
# サブチェインの出発点と終点を設定
strand_se_xyz = [
[
[[0., 0., 0.], [1, 0, 0]],
[[0., 0., 0.], [0, 1, 0]],
[[0., 0., 0.], [0, 0, 1]]
]
]
elif self.strand == "8_Chain":
# JPを設定
jp_xyz = [
[
[0.,0.,0.],
[0.5,0.5,0.5]
]
]
# サブチェインの出発点と終点を設定
strand_se_xyz = [
[
[[0.5, 0.5, 0.5], [0, 0, 0]],
[[0.5, 0.5, 0.5], [1, 0, 0]],
[[0.5, 0.5, 0.5], [0, 1, 0]],
[[0.5, 0.5, 0.5], [1, 1, 0]],
[[0.5, 0.5, 0.5], [0, 0, 1]],
[[0.5, 0.5, 0.5], [1, 0, 1]],
[[0.5, 0.5, 0.5], [0, 1, 1]],
[[0.5, 0.5, 0.5], [1, 1, 1]]
]
]
return jp_xyz, strand_se_xyz
#########################################################
def set_atom(self, jp_xyz, strand_se_xyz):
calcd_data_dic={}
count = 0
for i in (range(self.multi)):
for mol, jp in enumerate(jp_xyz):
atom_all = []
pos_all = {}
# システム全体にわたるジャンクションポイントのxyzとIDの辞書を作成
jp_id_dic, jp_xyz_dic, atom_jp = self.set_jp_id(jp, mol)
atom_all.extend(atom_jp)
pos_all.update(jp_xyz_dic)
# print(jp_xyz_dic)
# サブチェイン中の各アトムのxyzリストとボンドリストを作成
strand_xyz, bond_all, atom_sc, angle_all = self.set_strands(jp_id_dic, strand_se_xyz[mol], mol)
#
atom_all.extend(atom_sc)
pos_all.update(strand_xyz)
#
calcd_data_dic[count] = {"atom_all":atom_all, "bond_all":bond_all, "pos_all":pos_all, "angle_all":angle_all}
count += 1
return calcd_data_dic
###################################################
# システム全体にわたるJPのxyzとIDの辞書を作成
def set_jp_id(self, jp_xyz, mol):
jp_id_dic = {}
jp_xyz_dic = {}
atom_jp = []
jp_id = 0
for z in range(self.n_cell):
for y in range(self.n_cell):
for x in range(self.n_cell):
base_xyz = np.array([x,y,z])
for jp in jp_xyz:
jp_id_dic[tuple(np.array(jp) + base_xyz)] = (jp_id)
jp_xyz_dic[(jp_id)] = tuple(np.array(jp) + base_xyz)
atom_jp.append([jp_id, 2*mol + 0, 0])
jp_id += 1
return jp_id_dic, jp_xyz_dic, atom_jp
#########################################################
# サブチェイン中の各アトムのxyzリストとボンドリストを作成
def set_strands(self, jp_id_dic, strand_se_xyz, mol):
strand_xyz = {}
bond_all = {}
atom_sc = []
angle_all = []
sub_id = len(jp_id_dic)
bond_id = 0
for z in range(self.n_cell):
for y in range(self.n_cell):
for x in range(self.n_cell):
b_xyz = (x,y,z)
for se_xyz in strand_se_xyz:
tmp_xyz, tmp_bond, new_sub_id, new_bond_id, tmp_atom_sc, tmp_angle = self.calc_single_strand(jp_id_dic, sub_id, bond_id, b_xyz, se_xyz, mol)
strand_xyz.update(tmp_xyz)
bond_all.update(tmp_bond)
atom_sc.extend(tmp_atom_sc)
angle_all.append(tmp_angle)
sub_id = new_sub_id
bond_id = new_bond_id
return strand_xyz, bond_all, atom_sc, angle_all
###############################################################
# 一本のサブチェイン中の各アトムのxyzリストとボンドリストを作成
def calc_single_strand(self, jp_id_dic, sub_id, bond_id, b_xyz, se_xyz, mol):
tmp_xyz = {}
tmp_bond = {}
tmp_angle = []
tmp_atom_sc = []
bas_xyz = np.array(b_xyz)
# サブチェインの末端間のベクトルを設定
start_xyz = np.array(se_xyz[0]) + bas_xyz
end_xyz = np.array(se_xyz[1]) + bas_xyz
vec = end_xyz - start_xyz
# ストランドの鎖長分のループ処理
unit_len = 1./(self.n_segments + 1)
ortho_vec = self.find_ortho_vec(vec)
mod_o_vec = np.linalg.norm(vec)*ortho_vec
# 始点のアトムのIDを設定
mod_xyz = list(start_xyz)[:]
for dim in range(3):
if mod_xyz[dim] == self.n_cell:
mod_xyz[dim] = 0
s_id = jp_id_dic[tuple(mod_xyz)]
tmp_angle.append(s_id)
# 終点のアトムのIDを周期境界条件で変更
mod_xyz = list(end_xyz)[:]
for dim in range(3):
if mod_xyz[dim] == self.n_cell:
mod_xyz[dim] = 0
E_id = jp_id_dic[tuple(mod_xyz)]
# サブチェインの鎖長分のループ処理
for seg in range(self.n_segments):
pos = tuple(start_xyz + vec*(seg + 1)/(self.n_segments + 1.))
tmp_xyz[sub_id] = pos
if seg == 0 or seg == self.n_segments - 1:
tmp_atom_sc.append([sub_id, 1, 1])
else:
tmp_atom_sc.append([sub_id, 2, 2])
e_id = sub_id
#
if seg == 0:
bond = 0
else:
bond = 1
tmp_bond[bond_id] = tuple([bond, [s_id, e_id]])
bond_id += 1
tmp_angle.append(e_id)
s_id = e_id
sub_id += 1
if self.n_sc != 0:
sc_s_id = s_id
for i in range(self.n_sc):
tmp_xyz[sub_id] = tuple(np.array(pos) + (i + 1)*mod_o_vec*unit_len)
tmp_atom_sc.append([sub_id, 2, 1])
sc_e_id = sub_id
#
bond = 2
tmp_bond[bond_id] = tuple([bond, [sc_s_id, sc_e_id]])
sc_s_id = sc_e_id
sub_id += 1
bond_id += 1
e_id = E_id
bond = 0
tmp_bond[bond_id] = tuple([bond, [s_id, e_id]])
tmp_angle.append(e_id)
bond_id += 1
return tmp_xyz, tmp_bond, sub_id, bond_id, tmp_atom_sc, tmp_angle
######
def find_ortho_vec(self, list):
vec = np.array(list).reshape(-1,1)
# 線形独立である新たな三次元ベクトルを見つける。
rank = 0
while rank != 2:
a = np.array(np.random.rand(3)).reshape(-1,1)
target = np.hstack((vec, a))
rank = np.linalg.matrix_rank(target)
# QR分解により
q, r = np.linalg.qr( target )
# print(q[:,1])
ortho_vec = q[:,1]
return ortho_vec
|
from django.urls import path, include
from rest_framework import routers
# from rest.l2_serializers import DoctorSerializer, PodrazdeleniyaSerializer
# from rest.l2_view_sets import UserViewSet
router = routers.DefaultRouter()
# router.register(r'users', UserViewSet)
# router.register(r'doctorprofiles', DoctorSerializer)
# router.register(r'podrazdeleniyas', PodrazdeleniyaSerializer)
urlpatterns = [path('', include(router.urls)), path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))]
|
import numpy as np
from state import *
count_in_center=0
index_diff_block=-1
opposite_block=-1
def eval(cur_state:State):
global_point=np.zeros(9)
i=0
for block in cur_state.blocks:
point=0
countX_row=-np.count_nonzero(block==1, axis=1)
countX_col=-np.count_nonzero(block==1, axis=0)
countO_row=np.count_nonzero(block==-1, axis=1)
countO_col=np.count_nonzero(block==-1, axis=0)
for i in range(0,3):
if (countX_row[i]<0) and (countO_row[i]>0):
continue
else:
point+=(countX_row[i]+countO_row[i])
for i in range(0,3):
if (countX_col[i]<0) and (countO_col[i]>0):
continue
else:
point+=(countX_col[i]+countO_col[i])
countX_diagnol_topright=0
countO_diagnol_topright=0
countX_diagnol_topleft=0
countO_diagnol_topleft=0
for i in range (0,3):
if(block[i][i]==1):
countX_diagnol_topleft-=1
elif (block[i][i]==-1):
countO_diagnol_topleft+=1
if(block[i][2-i]==1):
countX_diagnol_topright-=1
elif (block[i][2-i]==-1):
countO_diagnol_topright+=1
if (countX_diagnol_topleft)==0 or (countO_diagnol_topleft)==0:
point+=(countX_diagnol_topleft+countO_diagnol_topleft)
if (countX_diagnol_topright)==0 or (countO_diagnol_topright)==0:
point+=(countX_diagnol_topright+countO_diagnol_topright)
global_point[i]=point
i+=1
return global_point.sum()
def max_value(cur_state, alpha, beta, depth):
leaf_state_val=terminate_state(cur_state, depth)
if leaf_state_val!=None:
return leaf_state_val
else:
v=-np.inf
valid_moves=cur_state.get_valid_moves
for move in valid_moves:
temp_state=State(cur_state)
temp_state.free_move=cur_state.free_move
temp_state.act_move(move)
val=min_value(temp_state, alpha, beta,depth-1)
v=max(v,val)
if(v>=beta):
return v
alpha=max(alpha, v)
return v
def min_value(cur_state, alpha, beta, depth):
leaf_state_val=terminate_state(cur_state, depth)
if leaf_state_val!=None:
return leaf_state_val
else:
v=np.inf
valid_moves=cur_state.get_valid_moves
if(len(valid_moves)!=0):
for move in valid_moves:
temp_state=State(cur_state)
temp_state.free_move=cur_state.free_move
temp_state.act_move(move)
val=max_value(temp_state, alpha, beta,depth-1)
v=min(v,val)
if(v<=alpha):
return v
beta=min(beta, v)
return v
def terminate_state(cur_state, depth):
if(depth==0):
return eval(cur_state)
else:
result=cur_state.game_result(cur_state.global_cells.reshape(3,3))
if(result!=None):
if(result==0): return 0
return -np.inf*result
else:
return None
def minimax_ab_cutoff(cur_state, tree_depth):
alpha=-np.inf
beta=np.inf
v=-np.inf
valid_moves=cur_state.get_valid_moves
if(len(valid_moves)!=0):
optimal_move=valid_moves[0]
for move in valid_moves:
temp_state=State(cur_state)
temp_state.free_move=cur_state.free_move
temp_state.act_move(move)
new_val=min_value(temp_state, alpha, beta, tree_depth)
if new_val>v:
v=new_val
alpha=v
optimal_move=move
return optimal_move
def select_move(cur_state, remain_time):
global index_diff_block
global count_in_center
global opposite_block
valid_moves = cur_state.get_valid_moves
##Go first
if(cur_state.player_to_move==1):
if(cur_state.previous_move==None):
count_in_center=0
index_diff_block=-1
opposite_block=-1
return UltimateTTT_Move(4,1,1,cur_state.player_to_move)
elif (index_diff_block==-1):
index_valid_block=cur_state.previous_move.x*3+cur_state.previous_move.y
if(count_in_center<7):
count_in_center+=1
return UltimateTTT_Move(index_valid_block, 1,1, cur_state.player_to_move)
else:
index_diff_block=index_valid_block
opposite_block=8-index_diff_block
return UltimateTTT_Move(index_diff_block, cur_state.previous_move.x,cur_state.previous_move.y, cur_state.player_to_move)
else:
if(cur_state.free_move==False):
if(cur_state.blocks[valid_moves[0].index_local_board][int(index_diff_block/3)][index_diff_block%3]==0):
return UltimateTTT_Move(valid_moves[0].index_local_board,int(index_diff_block/3),index_diff_block%3, cur_state.player_to_move)
else:
return UltimateTTT_Move(valid_moves[0].index_local_board,int((8-index_diff_block)/3),(8-index_diff_block)%3, cur_state.player_to_move)
if(cur_state.free_move==True):
if(cur_state.blocks[opposite_block][int(index_diff_block/3)][index_diff_block%3]==0):
return UltimateTTT_Move(opposite_block,int(index_diff_block/3),index_diff_block%3, cur_state.player_to_move)
else:
return UltimateTTT_Move(opposite_block,int((8-index_diff_block)/3),(8-index_diff_block)%3, cur_state.player_to_move)
#Go second
else:
return minimax_ab_cutoff(cur_state, 4)
return None
|
import os
import time
import threading
import tkinter.messagebox
import json
from mutagen.mp3 import MP3
from pygame import mixer
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askdirectory
# dir
sounderdir = os.getcwd()
# sounderdir = os.path.dirname(sys.executable)
userdir = os.path.expanduser('~')
# end
PlayerForm = Tk()
PlayerForm.geometry('800x500')
PlayerForm.title("Sounder!")
PlayerForm.resizable(width=FALSE, height=FALSE)
PlayerForm.iconbitmap(sounderdir + "\\Soundericon.ico")
s = ttk.Style()
s.theme_use('clam')
# variables
PlayLabelText = StringVar()
DirectoryLabelText = StringVar()
GenreLabelText = StringVar()
BitrateLabelText = StringVar()
VolumeValue = StringVar()
YearLabelText = StringVar()
TimeLabelText = StringVar()
SampleLabelText = StringVar()
NowYear = StringVar()
Avtime = StringVar()
TSongs = StringVar()
TVol = StringVar()
ETimeVar = StringVar()
maxsong = 0
playbuttonstate = 0
mode = 0
themeset = "Light"
infoframe = None
threads = 0
totallength = 0.0
directory = ""
version = "2.8.2"
settings = {}
# end
# images
PlayPhotoimg = PhotoImage(file=sounderdir + "\\musicicon.png")
Playimg = PhotoImage(file=sounderdir + "\\play.png")
Pauseimg = PhotoImage(file=sounderdir + "\\pause.png")
Forwardimg = PhotoImage(file=sounderdir + "\\forward.png")
Previousimg = PhotoImage(file=sounderdir + "\\previous.png")
Fileimg = PhotoImage(file=sounderdir + "\\file-directory.png")
RefreshLabelimg = PhotoImage(file=sounderdir + "\\refresh.png")
RepeatNone = PhotoImage(file=sounderdir + "\\repeatnone.png")
RepeatAll = PhotoImage(file=sounderdir + "\\repeatall.png")
RepeatOne = PhotoImage(file=sounderdir + "\\repeatone.png")
Info = PhotoImage(file=sounderdir + "\\info.png")
InfoMusic = PhotoImage(file=sounderdir + "\\musicinfo.png")
Copyright = PhotoImage(file=sounderdir + "\\copyright.png")
Fork = PhotoImage(file=sounderdir + "\\fork.png")
Theme = PhotoImage(file=sounderdir + "\\theme.png")
PlayPhotoimgD = PhotoImage(file=sounderdir + "\\musicicond.png")
PlayimgD = PhotoImage(file=sounderdir + "\\playd.png")
PauseimgD = PhotoImage(file=sounderdir + "\\paused.png")
ForwardimgD = PhotoImage(file=sounderdir + "\\forwardd.png")
PreviousimgD = PhotoImage(file=sounderdir + "\\previousd.png")
FileimgD = PhotoImage(file=sounderdir + "\\file-directoryd.png")
RefreshLabelimgD = PhotoImage(file=sounderdir + "\\refreshd.png")
RepeatNoneD = PhotoImage(file=sounderdir + "\\repeatnoned.png")
RepeatAllD = PhotoImage(file=sounderdir + "\\repeatalld.png")
RepeatOneD = PhotoImage(file=sounderdir + "\\repeatoned.png")
InfoD = PhotoImage(file=sounderdir + "\\infod.png")
InfoMusicD = PhotoImage(file=sounderdir + "\\musicinfod.png")
CopyrightD = PhotoImage(file=sounderdir + "\\copyrightd.png")
ForkD = PhotoImage(file=sounderdir + "\\forkd.png")
ThemeD = PhotoImage(file=sounderdir + "\\themed.png")
# end
# year
NowYear.set("Copyright 2018-{}".format(time.strftime("%Y")))
# end
def musicscan():
global directory, maxsong, listofsongs, state, songnumber
directory = directory.rstrip('\n')
state = 0
songnumber = 0
maxsong = -1
listofsongs = []
try:
os.chdir(directory)
for file in os.listdir(directory):
if file.endswith(".mp3"):
maxsong += 1
state = 1
listofsongs.append(file)
except:
os.chdir(sounderdir)
os.remove('cfg.json')
firststart()
def firststart():
global directory, themeset, settings
if os.path.exists('cfg.json'):
with open('cfg.json', 'r') as file:
try:
settings = json.load(file)
except:
settings["theme"] = "Light"
try:
directory = settings["directory"]
except:
directory = askdirectory()
settings["directory"] = directory
try:
themeset = settings["theme"]
themechange()
themechange()
except:
settings["theme"] = "Light"
themeset = "Light"
themechange()
themechange()
with open('cfg.json', 'w') as file:
json.dump(settings, file)
mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=4096)
mixer.init()
musicscan()
elif not os.path.exists('cfg.json'):
settings["theme"] = "Light"
themechange()
themechange()
directory = askdirectory()
settings["directory"] = directory
with open('cfg.json', 'w') as file:
json.dump(settings, file)
mixer.pre_init(frequency=44100, size=-16, channels=2, buffer=4096)
mixer.init()
musicscan()
def changedirectory():
global directory, sounderdir, state, settings
newdirectory = askdirectory()
if directory != newdirectory and newdirectory != "" or None:
os.chdir(sounderdir)
settings["directory"] = directory
with open('cfg.json', 'w') as file:
json.dump(settings, file)
MusicListBox.delete(0, END)
directory = newdirectory
DirectoryLabelText.set(directory)
musicscan()
update(state)
def update(cstate):
global listofsongs, maxsong, songnumber
try:
if cstate == 1:
if maxsong == 0:
TSongs.set("Song: {}".format(maxsong + 1))
elif maxsong > 0:
TSongs.set("Songs: {}".format(maxsong + 1))
listofsongs.reverse()
for file in listofsongs:
file = file.rstrip('.mp3')
MusicListBox.insert(0, file)
listofsongs.reverse()
if mixer.music.get_busy():
MusicListBox.selection_clear(0, END)
MusicListBox.select_set(songnumber)
elif cstate == 0:
MusicListBox.delete(0, END)
maxsong = -1
listofsongs = []
TSongs.set("Songs: 0")
ETimeVar.set("0:00")
except:
pass
def refreshdirectory():
global directory, maxsong, listofsongs, state
state = 0
maxsong = -1
listofsongs = []
MusicListBox.delete(0, END)
for file in os.listdir(directory):
if file.endswith(".mp3"):
maxsong += 1
state = 1
listofsongs.append(file)
update(state)
time.sleep(0.2)
def playsong():
global playbuttonstate, songnumber, listofsongs, state, themeset
if state == 1:
if playbuttonstate == 1:
mixer.music.pause()
if themeset == "Light":
PlayButton.configure(image=Playimg)
elif themeset == "Dark":
PlayButton.configure(image=PlayimgD)
playbuttonstate = 0
elif playbuttonstate == 0:
if mixer.music.get_busy():
mixer.music.unpause()
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
playbuttonstate = 1
else:
mixer.music.load(listofsongs[songnumber])
if len(listofsongs[songnumber]) > 60:
PlayLabelText.set(listofsongs[songnumber][0:60])
else:
PlayLabelText.set(str(listofsongs[songnumber]).rstrip('.mp3'))
mixer.music.play()
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
playbuttonstate = 1
preapir()
elif state == 0:
if playbuttonstate == 1:
mixer.music.stop()
PlayLabelText.set("")
ETimeVar.set("0:00")
if themeset == "Light":
PlayButton.configure(image=Playimg)
elif themeset == "Dark":
PlayButton.configure(image=PlayimgD)
playbuttonstate = 0
def nextsong():
global playbuttonstate, songnumber, state, maxsong, themeset
if state == 1:
if playbuttonstate == 1:
if songnumber < maxsong:
mixer.music.stop()
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
playbuttonstate = 1
songnumber += 1
mixer.music.load(listofsongs[songnumber])
mixer.music.play()
if len(listofsongs[songnumber]) > 60:
PlayLabelText.set(listofsongs[songnumber][0:60])
else:
PlayLabelText.set(str(listofsongs[songnumber]).rstrip('.mp3'))
preapir()
if playbuttonstate == 0:
if songnumber < maxsong:
mixer.music.stop()
playbuttonstate = 1
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
songnumber += 1
mixer.music.load(listofsongs[songnumber])
mixer.music.play()
if len(listofsongs[songnumber]) > 60:
PlayLabelText.set(listofsongs[songnumber][0:60])
else:
PlayLabelText.set(str(listofsongs[songnumber]).rstrip('.mp3'))
preapir()
def previoussong():
global playbuttonstate, songnumber, state, themeset
if state == 1:
if playbuttonstate == 1:
if songnumber > 0:
mixer.music.stop()
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
songnumber -= 1
mixer.music.load(listofsongs[songnumber])
mixer.music.play()
if len(listofsongs[songnumber]) > 50:
PlayLabelText.set(listofsongs[songnumber][0:50])
else:
PlayLabelText.set(str(listofsongs[songnumber]).rstrip('.mp3'))
preapir()
if playbuttonstate == 0:
if songnumber > 0:
mixer.music.stop()
playbuttonstate = 1
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
songnumber -= 1
mixer.music.load(listofsongs[songnumber])
mixer.music.play()
if len(listofsongs[songnumber]) > 50:
PlayLabelText.set(listofsongs[songnumber][0:50])
else:
PlayLabelText.set(str(listofsongs[songnumber]).rstrip('.mp3'))
preapir()
def musiclistboxpointer(e):
global selected, curent, state, songnumber, playbuttonstate, listofsongs, themeset
if state == 1:
selected = MusicListBox.curselection()
if selected != ():
mixer.music.stop()
for Song in selected:
curent = MusicListBox.get(Song)
for nr, Song in enumerate(listofsongs):
if Song.rstrip('.mp3') == curent:
mixer.music.load(listofsongs[nr])
songnumber = nr
mixer.music.play()
if playbuttonstate == 0:
playbuttonstate = 1
if themeset == "Light":
PlayButton.configure(image=Pauseimg)
elif themeset == "Dark":
PlayButton.configure(image=PauseimgD)
if len(listofsongs[songnumber]) > 50:
PlayLabelText.set(listofsongs[songnumber][0:50].rstrip('.mp3'))
else:
PlayLabelText.set(str(listofsongs[songnumber]).rstrip('.mp3'))
preapir()
def volume(value):
value = float(value)
value = value / 100
if value == 0.99:
TVol.set("Volume: 100%")
else:
TVol.set("Volume: {}%".format(int(value * 100)))
mixer.music.set_volume(value)
def preapir():
global listofsongs, songnumber, playbuttonstate, totallength
MusicListBox.selection_clear(0, END)
MusicListBox.select_set(songnumber)
file = MP3(listofsongs[songnumber])
bitratevar = int(file.info.bitrate / 1000)
samplerate = file.info.sample_rate
BitrateLabelText.set("Bitrate: " + str(bitratevar) + "kbps")
SampleLabelText.set("Sample Rate: " + str(samplerate) + "kHz")
try:
fileinfo = file.tags['TCON']
if len(str(fileinfo)) > 13:
GenreLabelText.set("Genre: " + str(fileinfo)[0:15])
else:
GenreLabelText.set("Genre: " + str(fileinfo))
except:
GenreLabelText.set("Genre: Unknown")
try:
fileyear = file.tags['TDRC']
YearLabelText.set("Year: " + str(fileyear))
except:
YearLabelText.set("Year: Unknown")
mins, secs = divmod(file.info.length, 60)
mins = int(mins)
secs = int(secs)
TimeLabelText.set("Time: " + str(mins) + ":" + str(secs).zfill(2))
totallength = round(file.info.length, 1)
MusicProgressBar["value"] = 0
MusicProgressBar["maximum"] = totallength
if playbuttonstate == 0:
PlayButton.configure(image=Pauseimg)
if not threads > 0:
music_progress = threading.Thread(target=progressbarfill,)
music_progress.daemon = True
music_progress.start()
def progressbarfill():
global playbuttonstate, themeset, threads, totallength
wait = False
threads += 1
activtime = 0
while mixer.music.get_busy() == 1 and activtime <= totallength - 0.11:
# time smoothing
if playbuttonstate == 1 and wait:
time.sleep(0.15)
wait = False
elif playbuttonstate == 1 and not wait:
activtime = mixer.music.get_pos() / 1000
MusicProgressBar["value"] = activtime
emin, esec = divmod(activtime, 60)
ETimeVar.set(str(int(emin)) + ":" + str(int(esec)).zfill(2))
elif playbuttonstate == 0 and not wait:
wait = True
# end
time.sleep(0.2)
threads -= 1
if activtime >= totallength - 0.5:
mixer.music.stop()
if themeset == "Light":
PlayButton.configure(image=Playimg)
elif themeset == "Dark":
PlayButton.configure(image=PlayimgD)
playbuttonstate = 0
playmode()
def playmode():
global mode, songnumber, maxsong, state
if state == 1:
time.sleep(0.5)
if mode == 0:
if songnumber < maxsong:
nextsong()
elif mode == 1:
if songnumber < maxsong:
nextsong()
elif songnumber == maxsong:
songnumber = 0
playsong()
elif mode == 2:
playsong()
def switchmode():
global mode, themeset
if mode == 0:
mode = 1
if themeset == "Light":
ModeButton.configure(image=RepeatAll)
elif themeset == "Dark":
ModeButton.configure(image=RepeatAllD)
elif mode == 1:
mode = 2
if themeset == "Light":
ModeButton.configure(image=RepeatOne)
elif themeset == "Dark":
ModeButton.configure(image=RepeatOneD)
else:
mode = 0
if themeset == "Light":
ModeButton.configure(image=RepeatNone)
elif themeset == "Dark":
ModeButton.configure(image=RepeatNoneD)
def close():
global themeset, playbuttonstate, settings, directory, version
if playbuttonstate == 1:
check = tkinter.messagebox.askquestion('Sounder!', 'Are you sure you want to quit?')
if check == 'yes':
os.chdir(sounderdir)
settings["theme"] = themeset
settings["directory"] = directory
settings["version"] = version
with open('cfg.json', 'w') as file:
json.dump(settings, file)
mixer.music.stop()
PlayerForm.destroy()
else:
pass
else:
os.chdir(sounderdir)
settings["theme"] = themeset
settings["directory"] = directory
settings["version"] = version
with open('cfg.json', 'w') as file:
json.dump(settings, file)
mixer.music.stop()
PlayerForm.destroy()
def info():
global themeset, infoframe, version
infoframe = Toplevel(PlayerForm)
infoframe.geometry("300x220")
infoframe.resizable(width=False, height=False)
infoframe.title("Sounder Info")
infoframe.iconbitmap(sounderdir + "\\Soundericon.ico")
infoframe.grab_set()
verlabel = ttk.Label(infoframe, text="Version {}".format(version), font='Bahnschrift 11', style="W.TLabel")
authorlabel = ttk.Label(infoframe, text="By: Mateusz Perczak", font='Bahnschrift 11', style="W.TLabel")
musiclabel = ttk.Label(infoframe, image=InfoMusic, style="W.TLabel")
copylabel = ttk.Label(infoframe, image=Copyright, style="W.TLabel")
infolabel = ttk.Label(infoframe, textvariable=NowYear, font='Bahnschrift 11', style="W.TLabel")
atlabel = ttk.Label(infoframe, textvariable=Avtime, font='Bahnschrift 11', style="W.TLabel")
themebutton = ttk.Button(infoframe, image=Theme, cursor="hand2", takefocus=0, command=themechange)
forknutton = ttk.Button(infoframe, image=Fork, cursor="hand2", takefocus=0, command=lambda: os.system("start \"\" "
"https"
"://github"
".com/losek1"
"/Sounder"))
if themeset == "Dark":
infoframe.configure(background='#000')
musiclabel.configure(image=InfoMusicD)
copylabel.configure(image=CopyrightD)
themebutton.configure(image=ThemeD)
forknutton.configure(image=ForkD)
elif themeset == "Light":
infoframe.configure(background='#fff')
musiclabel.place(x=90, y=15)
verlabel.place(x=110, y=94)
authorlabel.place(x=86, y=120)
copylabel.place(x=2, y=190)
infolabel.place(x=32, y=192)
atlabel.place(x=42, y=140)
forknutton.place(x=268, y=186)
themebutton.place(x=230, y=186)
def themechange():
global themeset, playbuttonstate, infoframe, mode
if infoframe is not None:
infoframe.destroy()
if themeset == "Dark":
themeset = "Light"
PlayerForm.configure(background='#fff')
MusicListBox.configure(selectbackground="#000", foreground='#000', background='#fff')
s.configure("G.Horizontal.TProgressbar", foreground='#000', background='#000', lightcolor='#000',
darkcolor='#fff', bordercolor='#fff', troughcolor='#fff')
s.configure("W.TLabel", background='#fff', foreground='#000', border='0')
s.configure("TButton", background='#fff', relief="flat")
s.configure("TScale", troughcolor='#fff', background='#fff', relief="flat")
VerButton.configure(image=Info)
DirectoryChangeButton.configure(image=Fileimg)
RefreshButton.configure(image=RefreshLabelimg)
NextButton.configure(image=Forwardimg)
PreviousButton.configure(image=Previousimg)
PlayImg.configure(image=PlayPhotoimg)
if playbuttonstate == 1:
PlayButton.configure(image=Pauseimg)
elif playbuttonstate == 0:
PlayButton.configure(image=Playimg)
if mode == 0:
ModeButton.configure(image=RepeatNone)
elif mode == 1:
ModeButton.configure(image=RepeatAll)
else:
ModeButton.configure(image=RepeatOne)
if infoframe is not None:
info()
elif themeset == "Light":
themeset = "Dark"
PlayerForm.configure(background='#000')
MusicListBox.configure(selectbackground="#1e88e5", foreground='#fff', background='#000')
s.configure("G.Horizontal.TProgressbar", foreground='#1e88e5', background='#1e88e5', lightcolor='#1e88e5',
darkcolor='#1e88e5', bordercolor='#000', troughcolor='#000')
s.configure("W.TLabel", foreground='#fff', background='#000', border='0')
s.configure("TButton", background='#000', relief="flat")
s.configure("TScale", troughcolor='#000', background='#1e88e5', relief="FLAT")
VerButton.configure(image=InfoD)
DirectoryChangeButton.configure(image=FileimgD)
RefreshButton.configure(image=RefreshLabelimgD)
NextButton.configure(image=ForwardimgD)
PreviousButton.configure(image=PreviousimgD)
PlayImg.configure(image=PlayPhotoimgD)
if playbuttonstate == 1:
PlayButton.configure(image=PauseimgD)
elif playbuttonstate == 0:
PlayButton.configure(image=PlayimgD)
if mode == 0:
ModeButton.configure(image=RepeatNoneD)
elif mode == 1:
ModeButton.configure(image=RepeatAllD)
else:
ModeButton.configure(image=RepeatOneD)
if infoframe is not None:
info()
def soundertime():
asec = 0
amin = 0
while True:
time.sleep(1)
asec += 1
if asec == 60:
amin += 1
asec = 0
Avtime.set("Sounder has been running for {}:{}".format(str(amin), str(asec).zfill(2)))
MusicProgressBar = ttk.Progressbar(PlayerForm, orient=HORIZONTAL, length=200, mode="determinate", style="G.Horizontal"
".TProgressbar")
PlayLabel = ttk.Label(PlayerForm, textvariable=PlayLabelText, font='Bahnschrift 11', style="W.TLabel")
GenreLabel = ttk.Label(PlayerForm, textvariable=GenreLabelText, font='Bahnschrift 11', style="W.TLabel")
PlayBitrate = ttk.Label(PlayerForm, textvariable=BitrateLabelText, font='Bahnschrift 11', style="W.TLabel")
VerButton = ttk.Button(PlayerForm, image=Info, cursor="hand2", takefocus=0, command=info)
DirectoryChangeButton = ttk.Button(PlayerForm, image=Fileimg, cursor="hand2", takefocus=0, command=changedirectory)
RefreshButton = ttk.Button(PlayerForm, image=RefreshLabelimg, cursor="hand2", takefocus=0, command=refreshdirectory)
DirectoryLabel = ttk.Label(PlayerForm, font='Bahnschrift 11', textvariable=DirectoryLabelText, style="W.TLabel")
PlayButton = ttk.Button(PlayerForm, image=Playimg, cursor="hand2", takefocus=0, command=playsong)
NextButton = ttk.Button(PlayerForm, image=Forwardimg, cursor="hand2", takefocus=0, command=nextsong)
PreviousButton = ttk.Button(PlayerForm, image=Previousimg, cursor="hand2", takefocus=0, command=previoussong)
MusicListBox = Listbox(PlayerForm, font='Bahnschrift 11', cursor="hand2", bd=0, activestyle="none",
selectbackground="#000", takefocus=0)
PlayImg = ttk.Label(PlayerForm, image=PlayPhotoimg, style="W.TLabel")
VolumeSlider = ttk.Scale(PlayerForm, from_=0, to=99, orient=HORIZONTAL, command=volume, cursor="hand2")
ModeButton = ttk.Button(PlayerForm, image=RepeatNone, cursor="hand2", takefocus=0, command=switchmode)
InfoLabel = ttk.Label(PlayerForm, text="File Info", font='Bahnschrift 11', style="W.TLabel")
YearLabel = ttk.Label(PlayerForm, textvariable=YearLabelText, font='Bahnschrift 11', style="W.TLabel")
TimeLabel = ttk.Label(PlayerForm, textvariable=TimeLabelText, font='Bahnschrift 11', style="W.TLabel")
SampleLabel = ttk.Label(PlayerForm, textvariable=SampleLabelText, font='Bahnschrift 11', style="W.TLabel")
InfoSeparator = ttk.Separator(PlayerForm, orient=HORIZONTAL)
SouInfo = ttk.Label(PlayerForm, text="Info", font='Bahnschrift 11', style="W.TLabel")
SouSeperator = ttk.Separator(PlayerForm, orient=HORIZONTAL)
TotalSongs = ttk.Label(PlayerForm, textvariable=TSongs, font='Bahnschrift 11', style="W.TLabel")
VolumeInfo = ttk.Label(PlayerForm, textvariable=TVol, font='Bahnschrift 11', style="W.TLabel")
ElapsedTime = ttk.Label(PlayerForm, textvariable=ETimeVar, font='Bahnschrift 10', style="W.TLabel")
# init ui
firststart()
mixer.music.set_volume(0.50)
VolumeSlider.set(50)
Avtime.set("It has been running for 0:00")
GenreLabelText.set("Genre: ")
if themeset == "Dark":
PlayLabelText.set("I'm blue da ba dee da ba daa")
else:
PlayLabelText.set("Never gonna give you up")
BitrateLabelText.set("Bitrate: ")
YearLabelText.set("Year: ")
TimeLabelText.set("Time: ")
SampleLabelText.set("Sample Rate: ")
ETimeVar.set("0:00")
DirectoryLabelText.set(directory)
update(state)
activetime = threading.Thread(target=soundertime, args=())
activetime.daemon = True
activetime.start()
# end
MusicProgressBar.place(x=1, y=492, width=800, height=9)
DirectoryChangeButton.place(x=32, y=0)
RefreshButton.place(x=1, y=0)
DirectoryLabel.place(x=66, y=2, width=651, height=28)
MusicListBox.place(x=1, y=32, width=550, height=388)
SampleLabel.place(x=597, y=145)
PlayBitrate.place(x=597, y=115)
GenreLabel.place(x=597, y=85)
InfoLabel.place(x=652, y=50)
YearLabel.place(x=597, y=175)
TimeLabel.place(x=597, y=205)
PlayImg.place(x=6, y=438)
PreviousButton.place(x=530, y=442)
PlayButton.place(x=574, y=442)
NextButton.place(x=618, y=442)
ModeButton.place(x=494, y=445)
VolumeSlider.place(x=670, y=454)
VerButton.place(x=763, y=0)
InfoSeparator.place(x=592, y=80, width=170, height=2)
SouInfo.place(x=666, y=250)
SouSeperator.place(x=592, y=280, width=170, height=2)
TotalSongs.place(x=592, y=285)
VolumeInfo.place(x=592, y=315)
PlayLabel.place(x=62, y=442)
ElapsedTime.place(x=62, y=462)
MusicListBox.bind("<<ListboxSelect>>", musiclistboxpointer)
PlayerForm.protocol("WM_DELETE_WINDOW", close)
PlayerForm.mainloop()
|
#simple sphinx extension to add json support
def setup(app):
from sphinx.highlighting import lexers
import pygments.lexers
lexers['json'] =pygments.lexers.get_lexer_by_name('javascript')
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from subprocess import check_output
import io
import bson # this is installed with the pymongo package
import matplotlib.pyplot as plt
from skimage.data import imread # or, whatever image library you prefer
import multiprocessing as mp # will come in handy due to the size of the data
import os
from tqdm import *
import struct
from collections import defaultdict
import cv2
from keras import backend as K
import threading
from keras.preprocessing.image import load_img, img_to_array
import tensorflow as tf
from keras.preprocessing.image import Iterator
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint
from skimage.color import rgb2yuv
############################################################################
__GLOBAL_PARAMS__ = {
'MODEL' : "Starter" ,
'DEBUG' : True,
'NORMALIZATION' : False,
'YUV' : False ,
'MULTI_SCALE' : False
}
########
if __GLOBAL_PARAMS__['MULTI_SCALE']:
raise Exception("MULTI_SCALE not supported yet!")
__MODEL__KEY__ = ""
for k in sorted(__GLOBAL_PARAMS__.keys()):
if not k.startswith("_"):
__MODEL__KEY__ += "__" + str(k) + "_" + str(__GLOBAL_PARAMS__[k])
if (__GLOBAL_PARAMS__['DEBUG']):
LOG_FILE = "simple.log"
else:
LOG_FILE = "log" + __MODEL__KEY__ + ".log"
SUB_FILE = "sub" + __MODEL__KEY__ + ".csv.gz"
import logging
logging.basicConfig(format='%(asctime)s %(message)s', filename=LOG_FILE,level=logging.DEBUG)
#logging.debug('This message should go to the log file')
if (__GLOBAL_PARAMS__['DEBUG']):
logging.info('** DEBUG: '+__MODEL__KEY__+' ****************************************************************')
else:
logging.info('** PRODUCTION:'+__MODEL__KEY__+' ****************************************************************')
#logging.warning('And this, too')
########### -------------> FUNC
def preprocess_image(x):
if __GLOBAL_PARAMS__['NORMALIZATION']:
x = (x - 128.0) / 128.0
if __GLOBAL_PARAMS__['YUV']:
x = np.array([rgb2yuv(x.reshape((1,180,180,3)))])
x = x.reshape((3,180,180))
return x
class BSONIterator(Iterator):
def __init__(self, bson_file, images_df, offsets_df, num_class,
image_data_generator, lock, target_size=(180, 180),
with_labels=True, batch_size=32, shuffle=False, seed=None):
self.file = bson_file
self.images_df = images_df
self.offsets_df = offsets_df
self.with_labels = with_labels
self.samples = len(images_df)
self.num_class = num_class
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.image_shape = self.target_size + (3,)
print("Found %d images belonging to %d classes." % (self.samples, self.num_class))
super(BSONIterator, self).__init__(self.samples, batch_size, shuffle, seed)
self.lock = lock
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
if self.with_labels:
batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx())
for i, j in enumerate(index_array):
# Protect file and dataframe access with a lock.
with self.lock:
image_row = self.images_df.iloc[j]
product_id = image_row["product_id"]
offset_row = self.offsets_df.loc[product_id]
# Read this product's data from the BSON file.
self.file.seek(offset_row["offset"])
item_data = self.file.read(offset_row["length"])
# Grab the image from the product.
item = bson.BSON.decode(item_data)
img_idx = image_row["img_idx"]
bson_img = item["imgs"][img_idx]["picture"]
# Load the image.
img = load_img(io.BytesIO(bson_img), target_size=self.target_size)
# Preprocess the image.
x = img_to_array(img)
x = preprocess_image(x)
#x = self.image_data_generator.random_transform(x)
#x = self.image_data_generator.standardize(x)
# Add the image and the label to the batch (one-hot encoded).
batch_x[i] = x
if self.with_labels:
batch_y[i, image_row["category_idx"]] = 1
if self.with_labels:
return batch_x, batch_y
else:
return batch_x
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array[0])
def make_category_tables():
cat2idx = {}
idx2cat = {}
for ir in categories_df.itertuples():
category_id = ir[0]
category_idx = ir[4]
cat2idx[category_id] = category_idx
idx2cat[category_idx] = category_id
return cat2idx, idx2cat
def read_bson(bson_path, num_records, with_categories):
rows = {}
with open(bson_path, "rb") as f, tqdm(total=num_records) as pbar:
offset = 0
while True:
item_length_bytes = f.read(4)
if len(item_length_bytes) == 0:
break
length = struct.unpack("<i", item_length_bytes)[0]
f.seek(offset)
item_data = f.read(length)
assert len(item_data) == length
item = bson.BSON.decode(item_data)
product_id = item["_id"]
num_imgs = len(item["imgs"])
row = [num_imgs, offset, length]
if with_categories:
row += [item["category_id"]]
rows[product_id] = row
offset += length
f.seek(offset)
pbar.update()
columns = ["num_imgs", "offset", "length"]
if with_categories:
columns += ["category_id"]
df = pd.DataFrame.from_dict(rows, orient="index")
df.index.name = "product_id"
df.columns = columns
df.sort_index(inplace=True)
return df
def make_val_set(df, split_percentage=0.2, drop_percentage=0.):
# Find the product_ids for each category.
category_dict = defaultdict(list)
for ir in tqdm(df.itertuples()):
category_dict[ir[4]].append(ir[0])
train_list = []
val_list = []
with tqdm(total=len(df)) as pbar:
for category_id, product_ids in category_dict.items():
category_idx = cat2idx[category_id]
# Randomly remove products to make the dataset smaller.
keep_size = int(len(product_ids) * (1. - drop_percentage))
if keep_size < len(product_ids):
product_ids = np.random.choice(product_ids, keep_size, replace=False)
# Randomly choose the products that become part of the validation set.
val_size = int(len(product_ids) * split_percentage)
if val_size > 0:
val_ids = np.random.choice(product_ids, val_size, replace=False)
else:
val_ids = []
# Create a new row for each image.
for product_id in product_ids:
row = [product_id, category_idx]
for img_idx in range(df.loc[product_id, "num_imgs"]):
if product_id in val_ids:
val_list.append(row + [img_idx])
else:
train_list.append(row + [img_idx])
pbar.update()
columns = ["product_id", "category_idx", "img_idx"]
train_df = pd.DataFrame(train_list, columns=columns)
val_df = pd.DataFrame(val_list, columns=columns)
return train_df, val_df
########### -------------> MAIN
categories_path = os.path.join("data", "category_names.csv")
categories_df = pd.read_csv(categories_path, index_col="category_id")
# Maps the category_id to an integer index. This is what we'll use to
# one-hot encode the labels.
print(">>> Mapping category_id to an integer index ... ")
categories_df["category_idx"] = pd.Series(range(len(categories_df)), index=categories_df.index)
print(categories_df.head())
cat2idx, idx2cat = make_category_tables()
# Test if it works:
print(cat2idx[1000012755], idx2cat[4] , len(cat2idx))
print(">>> Train set ... ")
data_dir = "data"
if (__GLOBAL_PARAMS__['DEBUG']):
print(">>> DEBUG mode ... ")
train_bson_path = os.path.join(data_dir, "train_example.bson")
num_train_products = 82
else:
print(">>> PRODUCTION mode ... ")
train_bson_path = os.path.join(data_dir, "train.bson")
num_train_products = 7069896
test_bson_path = os.path.join(data_dir, "test.bson")
num_test_products = 1768182
print(train_bson_path,num_train_products)
if (not __GLOBAL_PARAMS__['DEBUG']):
if os.path.isfile("train_offsets.csv"):
print(">> reading from file train_offsets ... ")
train_offsets_df = pd.read_csv("train_offsets.csv")
train_offsets_df.set_index( "product_id" , inplace= True)
train_offsets_df.sort_index(inplace=True)
else:
train_offsets_df = read_bson(train_bson_path, num_records=num_train_products, with_categories=True)
train_offsets_df.to_csv("train_offsets.csv")
print(train_offsets_df.head())
if os.path.isfile("train_images.csv"):
print(">> reading from file train_images / val_images ... ")
train_images_df = pd.read_csv("train_images.csv")
train_images_df = train_images_df[['product_id','category_idx','img_idx']]
val_images_df = pd.read_csv("val_images.csv")
val_images_df = val_images_df[['product_id', 'category_idx', 'img_idx']]
else:
train_images_df, val_images_df = make_val_set(train_offsets_df, split_percentage=0.2, drop_percentage=0)
train_images_df.to_csv("train_images.csv")
val_images_df.to_csv("val_images.csv")
print(train_images_df.head())
print(val_images_df.head())
categories_df.to_csv("categories.csv")
else:
train_offsets_df = read_bson(train_bson_path, num_records=num_train_products, with_categories=True)
train_images_df, val_images_df = make_val_set(train_offsets_df, split_percentage=0.2, drop_percentage=0)
print(train_images_df.head())
print(val_images_df.head())
## Generator
print(">>> Generator ... ")
# Tip: use ImageDataGenerator for data augmentation and preprocessing ??
train_bson_file = open(train_bson_path, "rb")
lock = threading.Lock()
num_classes = len(cat2idx)
num_train_images = len(train_images_df)
num_val_images = len(val_images_df)
batch_size = 256
train_datagen = ImageDataGenerator()
train_gen = BSONIterator(train_bson_file, train_images_df, train_offsets_df,
num_classes, train_datagen, lock,
batch_size=batch_size, shuffle=True)
val_datagen = ImageDataGenerator()
val_gen = BSONIterator(train_bson_file, val_images_df, train_offsets_df,
num_classes, val_datagen, lock,
batch_size=batch_size, shuffle=True)
## Model
model = Sequential()
model.add(Conv2D(32, 3, padding="same", activation="relu", input_shape=(180, 180, 3)))
model.add(MaxPooling2D())
model.add(Conv2D(64, 3, padding="same", activation="relu"))
model.add(MaxPooling2D())
model.add(Conv2D(128, 3, padding="same", activation="relu"))
model.add(MaxPooling2D())
model.add(GlobalAveragePooling2D())
model.add(Dense(num_classes, activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()
early_stopping = EarlyStopping(monitor='val_loss', patience=0 )
bst_model_path = "mod" + __MODEL__KEY__ + '.h5'
model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)
# To train the model:
history = model.fit_generator(train_gen,
epochs=200,
steps_per_epoch = num_train_images // batch_size + 1,
validation_data = val_gen,
validation_steps = num_val_images // batch_size + 1,
callbacks=[early_stopping, model_checkpoint])
print(history.history.keys())
# logging
logging.info('N. epochs == '+str(len(history.history['val_acc'])))
logging.info('Val accuracy == '+str(max(history.history['val_acc'])))
## Predict on Test-set
print(">>> Predicting on test-set ... ")
submission_df = pd.read_csv("data/sample_submission.csv")
print(submission_df.head())
test_datagen = ImageDataGenerator()
data = bson.decode_file_iter(open(test_bson_path, "rb"))
with tqdm(total=num_test_products) as pbar:
for c, d in enumerate(data):
product_id = d["_id"]
num_imgs = len(d["imgs"])
batch_x = np.zeros((num_imgs, 180, 180, 3), dtype=K.floatx())
for i in range(num_imgs):
bson_img = d["imgs"][i]["picture"]
# Load and preprocess the image.
img = load_img(io.BytesIO(bson_img), target_size=(180, 180))
x = img_to_array(img)
x = preprocess_image(x)
# = test_datagen.random_transform(x)
# = test_datagen.standardize(x)
# Add the image to the batch.
batch_x[i] = x
prediction = model.predict(batch_x, batch_size=num_imgs)
avg_pred = prediction.mean(axis=0)
cat_idx = np.argmax(avg_pred)
submission_df.iloc[c]["category_id"] = idx2cat[cat_idx]
pbar.update()
submission_df.to_csv(SUB_FILE, compression="gzip", index=False)
|
"""Hyperparameter search strategies."""
import itertools
import json
import random
def generate_trials(strategy, flat_params, nb_trials=None):
r"""Generates the parameter combinations to search.
Two search strategies are implemented:
1. `grid_search`: creates a search space that consists of the
product of all flat_params. If `nb_trials` is specified
the first `nb_trials` combinations are searched.
2. `random_search`: Creates random combinations of the
hyperparameters. Can be used for a more efficient search.
See (Bergstra and Bengio, 2012) for more details.
:param strategy: The hyperparameter search to strategy. Can be
one of: {`grid_search`, `random`}.
:param flat_params: The hyperparameter arguments to iterate over.
:param nb_trials: The number of hyperparameter combinations to try.
Generates the parameter combinations for each requested trial
:param strategy:
:param flat_params:
:param nb_trials: The number of trials to un.
:return:
"""
if strategy == 'grid_search':
trials = generate_grid_search_trials(flat_params, nb_trials)
return trials
elif strategy == 'random_search':
trials = generate_random_search_trials(flat_params, nb_trials)
return trials
else:
raise ValueError(
('Unknown strategy "{}". Must be one of '
'{{grid_search, random_search}}').format(strategy))
def generate_grid_search_trials(flat_params, nb_trials):
"""
Standard grid search. Takes the product of `flat_params`
to generate the search space.
:param params: The hyperparameters options to search.
:param nb_trials: Returns the first `nb_trials` from the
combinations space. If this is None, all combinations
are returned.
:return: A dict containing the hyperparameters.
"""
trials = list(itertools.product(*flat_params))
if nb_trials:
trials = trials[0:nb_trials]
return trials
def generate_random_search_trials(params, nb_trials):
"""
Generates random combination of hyperparameters to try.
See (Bergstra and Bengio, 2012) for more details.
:param params: The hyperparameters options to search.
:param nb_trials: The number of trials to run.
:return: A dict containing the hyperparameters.
"""
if nb_trials is None:
raise TypeError(
'`random_search` strategy requires nb_trails to be an int.')
results = []
# ensures we have unique results
seen_trials = set()
# shuffle each param list
potential_trials = 1
for param in params:
random.shuffle(param)
potential_trials *= len(param)
# we can't sample more trials than are possible
max_iters = min(potential_trials, nb_trials)
# then for the nb of trials requested, create a new param tuple
# by picking a random integer at each param level
while len(results) < max_iters:
trial = []
for param in params:
sampled_param = random.sample(param, 1)[0]
trial.append(sampled_param)
# verify this is a unique trial so we
# don't duplicate work
trial_str = json.dumps(trial)
if trial_str not in seen_trials:
seen_trials.add(trial_str)
results.append(trial)
return results
|
from .src.core import show_dataset, list_datasets, get_dataset
__version__ ="0.0.1"
__author__ = "Siddesh Sambasivam Suseela"
|
import time
import sqlite3 as db
from datetime import datetime as dt
con = db.connect("parentsdb.db")
with con:
#hostsPath="hosts"
hostsPath="C:\Windows\System32\drivers\etc\hosts"
redirect="127.0.0.1"
cur = con.cursor()
phone_number = int(input("Enter phone numer :"))
web = ('SELECT websites FROM parents WHERE phone_no = ?')
cur.execute(web,[(phone_number)])
websites = cur.fetchone()
while True:
if dt(dt.now().year,dt.now().month,dt.now().day,8) < dt.now() < dt(dt.now().year,dt.now().month,dt.now().day,22):
print ("Working hours...")
with open(hostsPath,'r+') as file:
content=file.read()
for site in websites:
if site in content:
pass
else:
file.write(redirect+" "+site+"\n")
else:
with open(hostsPath,'r+') as file:
content=file.readlines()
file.seek(0)
for line in content:
if not any(site in line for site in websites):
file.write(line)
file.truncate()
print ("Fun hours...")
time.sleep(5)
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.config import settings
from app.routes import sentence, text_complexity, tokens
app = FastAPI(
title=settings.PROJECT_NAME,
docs_url=f"{settings.API_STR}/docs",
openapi_url=f"{settings.API_STR}/openapi.json",
)
app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(tokens.router, prefix=settings.API_STR)
app.include_router(sentence.router, prefix=settings.API_STR)
app.include_router(text_complexity.router, prefix=settings.API_STR)
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
__author__ = "ajsanchezsanz"
__copyright__ = "2015"
__credits__ = ["ajsanchezsanz","ffranz"]
__license__ = "Apache"
__version__ = "0.0.1"
__maintainer__ = "ajsanchezsanz,ffranz"
__email__ = "alberto@sinfonier-project.net"
__status__ = "Developing"
import requests
class StormUIAPI:
def __init__(self, stormhost="localhost", stormport=8080):
self.HOST = "http://"+stormhost+":"+str(stormport)
# GET Operations
######################################
# /api/v1/cluster/configuration (GET)
# Returns the cluster configuration.
######################################
def getClusterConfiguration(self):
url = self.HOST+"/api/v1/cluster/configuration"
return requests.get(url).json()
######################################
# /api/v1/cluster/summary (GET)
# Returns cluster summary information such as nimbus uptime or number of supervisors.
######################################
def getClusterSummary(self):
url = self.HOST+"/api/v1/cluster/summary"
return requests.get(url).json()
######################################
# /api/v1/supervisor/summary (GET)
# Returns summary information for all supervisors.
######################################
def getSupervisorSummary(self):
url = self.HOST+"/api/v1/supervisor/summary"
return requests.get(url).json()
######################################
# /api/v1/topology/summary (GET)
# Returns summary information for all topologies.
######################################
def getTopologySummary(self):
url = self.HOST+"/api/v1/topology/summary"
return requests.get(url).json()
######################################
# /api/v1/topology/:id (GET)
# Returns topology information and statistics. Substitute id with topology id.
######################################
def getTopology(self,topologyid):
url = self.HOST+"/api/v1/topology/"+topologyid
return requests.get(url).json()
######################################
# /api/v1/topology/:id/component/:component (GET)
# Returns detailed metrics and executor information
######################################
def getTopologyComponent(self,topologyid, componentid):
url = self.HOST+"/api/v1/topology/"+topologyid+"/component/"+componentid
return requests.get(url).json()
# POST Operations
######################################
# /api/v1/uploadTopology (POST)
# uploads a topology.
######################################
def uploadTopology(self,topologyConfig, topologyJar):
return "Not implemented yet in this version"
#url = self.HOST+"/api/v1/uploadTopology"
#return requests.get(url).json()
######################################
# /api/v1/topology/:id/activate (POST)
# Activates a topology.
######################################
def activateTopology(self,topologyid):
url = self.HOST+"/api/v1/topology/"+topologyid+"/activate"
return requests.post(url).json()
######################################
# /api/v1/topology/:id/deactivate (POST)
# Deactivates a topology.
######################################
def deactivateTopology(self,topologyid):
url = self.HOST+"/api/v1/topology/"+topologyid+"/deactivate"
return requests.post(url).json()
######################################
# /api/v1/topology/:id/rebalance/:wait-time (POST)
# Rebalances a topology.
# rebalanceOptions = {"rebalanceOptions": {"numWorkers": 2, "executors": { "spout" : "5", "split": 7, "count": 5 }}, "callback":"foo"}
######################################
def rebalanceTopology(self,topologyid, wait_time, rebalanceOptions={}):
url = self.HOST+"/api/v1/topology/"+topologyid+"/rebalance/"+wait_time
headers = {"Content-Type" : "application/json"}
return requests.post(url, data=json.dumps(rebalanceOptions), headers=headers).json()
######################################
# /api/v1/topology/:id/kill/:wait-time (POST)
# Kills a topology.
######################################
def killTopology(self,topologyid, wait_time):
url = self.HOST+"/api/v1/topology/"+topologyid+"/kill/"+wait_time
return requests.post(url).json()
######################################
######################################
# Get topology summary by name (GET)
# This function makes 1 StormUI API query
######################################
def getTopologySummaryByName(self,topologyname):
response = self.getTopologySummary()
topologies = response["topologies"]
for topo in topologies:
if topo["name"] == topologyname:
return topo
return {}
######################################
# Get topology detail by name (GET)
# This function makes 2 StormUI API queries
######################################
def getTopologyByName(self,topologyname):
response = self.getTopologySummary()
topologies = response["topologies"]
for topo in topologies:
if topo["name"] == topologyname:
response = self.getTopology(topo["id"])
return response
return {}
######################################
# Get worker by ID (GET)
# This function makes 2 StormUI API queries
######################################
## Return workers list from all spouts and all executors of the topology. Without duplicates.
def getWorkersByTopologyID(self,topologyid):
topo = self.getTopology(topologyid)
spoutids = [spout["spoutId"] for spout in topo["spouts"]]
workersLinks = list()
for spoutid in spoutids:
component = self.getTopologyComponent(topologyid, spoutid)
for executor in component["executorStats"]:
workersLinks.append(executor["workerLogLink"])
return list(set(workersLinks))
######################################
# Get worker by Name (GET)
# This function makes 3 StormUI API queries
######################################
## Return workers list from all spouts and all executors of the topology. Without duplicates.
def getWorkersByTopologyName(self,topologyname):
topo = self.getTopologyByName(topologyname)
spoutids = [spout["spoutId"] for spout in topo["spouts"]]
workersLinks = list()
for spoutid in spoutids:
component = self.getTopologyComponent(topo["id"], spoutid)
for executor in component["executorStats"]:
workersLinks.append(executor["workerLogLink"])
return list(set(workersLinks))
######################################
# Get error in topology by topology Name (GET)
# This function makes 2 StormUI API queries
######################################
def getErrorInTopologyByName(self,topologyname):
topo = self.getTopologyByName(topologyname)
if topo:
# Return True if there is an error in any module of the topology and False if not
return any(module["lastError"] for module in (topo["spouts"]+topo["bolts"]))
######################################
# Get error details in topology by topology Name (GET)
# This function makes 2 StormUI API queries
######################################
def getErrorDetailsInTopologyByName(self,topologyname):
topo = self.getTopologyByName(topologyname)
return [{module["spoutId"] : module["lastError"]} for module in topo["spouts"]]+[{module["boltId"] : module["lastError"]} for module in topo["bolts"]] if topo else None
|
from src.data.preprocessing.AbstractPreprocessing import AbstractPreprocessing
from skimage.transform import resize
class Resize5050(AbstractPreprocessing):
def preprocess(self, x):
return resize(x, [50, 50])
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
# CHANGE THE ALLOWED_HOSTS LIST TO FIT YOUR NEEDS
ALLOWED_HOSTS = ['.borsachart.herokuapp.com', 'localhost', '127.0.0.1', '[::1]']
ADMINS = [(os.environ.get('ADMIN_USER'), os.environ.get('ADMIN_EMAIL'))]
# Email
EMAIL_HOST = 'smtp.yandex.ru'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL_USER')
SERVER_EMAIL = os.environ.get('EMAIL_USER')
# Database
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
DATABASES = {
'default': dj_database_url.config(
default=os.environ.get('DATABASE_URL')
)
}
import os
import urlparse
redis_url = urlparse.urlparse(os.environ.get('REDIS_URL'))
CACHES = {
"default": {
"BACKEND": "redis_cache.RedisCache",
"LOCATION": "{0}:{1}".format(redis_url.hostname, redis_url.port),
"OPTIONS": {
"PASSWORD": redis_url.password,
"DB": 0,
}
}
}
REDIS_HOST = redis_url.hostname
REDIS_PORT = redis_url.port
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [(REDIS_HOST, REDIS_PORT)],
},
"ROUTING": "charts.routing.channel_routing",
},
}
BROKER_URL = 'redis://{}:{}'.format(REDIS_HOST, REDIS_PORT)
CELERY_RESULT_BACKEND = 'redis://{}:{}'.format(REDIS_HOST, REDIS_PORT)
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Disable browsable API
# REST_FRAMEWORK = {
# 'DEFAULT_RENDERER_CLASSES': (
# 'rest_framework.renderers.JSONRenderer',
# )
# }
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] [%(levelname)s] [%(name)s] [%(lineno)s] %(message)s",
'datefmt': "%d/%m/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'assets_rotating_file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(logsdir, 'assets.log'),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 10,
},
'template_loader_rotating_file': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(logsdir, 'template_loader.log'),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 10,
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'assets': {
'handlers': ['assets_rotating_file'],
'level': 'INFO',
},
'template_loader': {
'handlers': ['template_loader_rotating_file'],
'level': 'INFO',
},
}
}
|
import os
import yaml
import numpy as np
from mspasspy.ccore.utility import MetadataDefinitions
from mspasspy.ccore.utility import MDtype
from mspasspy.ccore.utility import MsPASSError
def index_data(filebase, db, ext='d3C', verbose=False):
"""
Import function for data from antelope export_to_mspass.
This function is an import function for Seismogram objects created
by the antelope program export_to_mspass. That program writes
header data as a yaml file and the sample data as a raw binary
fwrite of the data matrix (stored in fortran order but written
as a contiguous block of 3*npts (number of samples) double values.
This function parses the yaml file and adds three critical metadata
entries: dfile, dir, and foff. To get foff values the function
reads the binary data file and gets foff values by calls to tell.
It then writes these entries into MongoDB in the wf collection
of a database. Readers that want to read this raw data will
need to use dir, dfile, and foff to find the right file and read point.
:param filebase: is the base name of the dataset to be read and indexed.
The function will look for filebase.yaml for the header data and
filebase.ext (Arg 3 defaulting to d3C).
:param db: is the MongoDB database handler
:param ext: is the file extension for the sample data (default is 'd3C').
"""
# This loads default mspass schema
mdef=MetadataDefinitions()
yamlfile=filebase+'.yaml'
fh=open(yamlfile)
d=yaml.load(fh,Loader=yaml.FullLoader)
if(verbose):
print('Read data for ',len(d),' objects')
fh.close()
# Set up to add to wf collection
# This is not general, but works of this test with mongo running under docker
collection=db.wf
dfile=filebase+'.'+ext
fh=open(dfile)
# This is needed by the numpy reader
dtyp=np.dtype('f8')
dir=os.path.dirname(os.path.realpath(dfile))
dfile = os.path.basename(os.path.realpath(dfile))
if(verbose):
print('Setting dir =',dir,' and dfile=',dfile,' for all input')
print('Make sure this file exists before trying to read these data')
print('This program only builds the wf collection in the database')
print('Readers of the raw data will access the sample data from the dir+dfile path')
for i in range(len(d)):
pyd={} # this is essentially a required python declaration
# Since the source is assumed an antelope css3.0 database we
# assume these will be defined. Relating them back to the original
# source would be impossible without these in css3.0 so shouldn't be
# an issue
if(verbose):
print('Working on sta=',d[i]['sta'],' and evid=',d[i]['evid'])
keys=d[i].keys()
for k in keys:
try:
typ=mdef.type(k)
if(typ==MDtype.Double or typ==MDtype.Real64 or typ==MDtype.Real32):
pyd[k]=float(d[i][k])
elif(typ==MDtype.Int64 or typ==MDtype.Int32):
pyd[k]=int(d[i][k])
elif(typ==MDtype.String):
pyd[k]=str(d[i][k])
elif(type==MDtype.Boolean):
pyd[k]=bool(d[i][k])
else:
# These are not optional - always print these if
# this happens to warn user
print("Warning(index_data): undefined type for key=",k)
print("attribute will not be copied to database")
except MsPASSError:
# as above always print this as a warning
print("Warning(index_data): key =",k," is undefined - skipped")
pyd['dir']=dir
pyd['dfile']=dfile
ns=pyd['npts']
ns3c=3*ns
foff=fh.tell()
pyd['foff']=foff
wfid=collection.insert_one(pyd).inserted_id
junk=np.fromfile(fh,dtype=dtyp,count=ns3c)
if(verbose):
print("Finished with file=",dfile)
print("Size of wf collection is now ",collection.count_documents({})," documents")
|
from server.api import api, custom_types
from flask_restful import Resource, reqparse, abort, marshal, marshal_with
from server import auth, db
from server.models.orm import TeacherModel, ClassroomModel
from server.parsing import parser
from server.parsing.utils import create_students_df
import pandas as pd
from server.config import RestErrors, ValidatorsConfig
from server.models.marshals import classrooms_list_fields, classroom_resource_fields
class ClassroomsResource(Resource):
method_decorators = [auth.login_required]
def __init__(self):
super().__init__()
self._post_args = reqparse.RequestParser(bundle_errors=True)
self._post_args.add_argument('name', type=str, required=True)
self._post_args.add_argument('students_file', type=custom_types.students_file, location='files', required=True)
self._put_args = reqparse.RequestParser(bundle_errors=True)
self._put_args.add_argument('new_name', type=str, location="json", required=True)
def get(self, class_id=None):
if class_id is None:
return marshal(auth.current_user().classrooms, classrooms_list_fields)
current_class = ClassroomModel.query.filter_by(id=class_id, teacher=auth.current_user()).first() # Making sure the class belongs to the current user
if current_class is None:
abort(400, message=RestErrors.INVALID_CLASS)
return marshal(current_class, classroom_resource_fields)
@marshal_with(classroom_resource_fields)
def post(self, class_id=None):
if class_id:
abort(404, message=RestErrors.INVALID_ROUTE)
if len(auth.current_user().classrooms) >= ValidatorsConfig.MAX_CLASSROOMS:
abort(400, message=RestErrors.MAX_CLASSROOMS)
args = self._post_args.parse_args()
new_class = ClassroomModel(name=args['name'], teacher=auth.current_user())
db.session.add(new_class)
db.session.commit()
args['students_file']['class_id'] = pd.Series([new_class.id] * args['students_file'].shape[0])
args['students_file'].to_sql('student', con=db.engine, if_exists="append", index=False)
return new_class
def put(self, class_id=None):
if class_id is None:
abort(404, message=RestErrors.INVALID_ROUTE)
args = self._put_args.parse_args()
current_class = ClassroomModel.query.filter_by(id=class_id, teacher=auth.current_user()).first() # Making sure the class belongs to the current user
if current_class is None:
abort(400, message=RestErrors.INVALID_CLASS)
current_class.name = args['new_name']
db.session.commit()
return "", 204
def delete(self, class_id=None):
if class_id is None: # Deleting all classes
teacher_classes_id = db.session.query(ClassroomModel.id).filter_by(teacher=auth.current_user()).all()
for class_data in teacher_classes_id:
current_class = ClassroomModel.query.filter_by(id=class_data.id, teacher=auth.current_user()).first()
db.session.delete(current_class)
db.session.commit()
return "", 204
current_class = ClassroomModel.query.filter_by(id=class_id, teacher=auth.current_user()).first() # Making sure the class belongs to the current user
if current_class is None:
abort(400, message=RestErrors.INVALID_CLASS)
db.session.delete(current_class)
db.session.commit()
return "", 204
api.add_resource(ClassroomsResource, "/classrooms", "/classrooms/<int:class_id>")
|
import sys
sys.path.append('../..')
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from social.apps.pyramid_app.models import init_social
from .models import DBSession, Base
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
session_factory = UnencryptedCookieSessionFactoryConfig('thisisasecret')
config = Configurator(settings=settings,
session_factory=session_factory,
autocommit=True)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_request_method('example.auth.get_user', 'user', reify=True)
config.add_route('home', '/')
config.add_route('done', '/done')
config.include('example.settings')
config.include('example.local_settings')
config.include('social.apps.pyramid_app')
init_social(config, Base, DBSession)
config.scan()
config.scan('social.apps.pyramid_app')
return config.make_wsgi_app()
|
# Generated by Django 2.2 on 2019-05-28 22:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('sites', '0002_alter_domain_unique'),
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('desc', models.CharField(default='', max_length=200)),
('type', models.IntegerField(choices=[(0, 'Bronze'), (1, 'Silver'), (2, 'Gold')], default=0)),
('icon', models.CharField(default='', max_length=250)),
('uid', models.CharField(max_length=32, unique=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(choices=[(0, 'Pending'), (1, 'Open'), (2, 'Closed'), (3, 'Deleted')], db_index=True, default=1)),
('type', models.IntegerField(choices=[(0, 'Question'), (1, 'Answer'), (6, 'Comment'), (2, 'Job'), (3, 'Forum'), (8, 'Tutorial'), (7, 'Data'), (4, 'Page'), (10, 'Tool'), (11, 'News'), (5, 'Blog'), (9, 'Bulletin Board')], db_index=True)),
('title', models.CharField(db_index=True, max_length=200)),
('rank', models.FloatField(blank=True, db_index=True, default=0)),
('answer_count', models.IntegerField(blank=True, db_index=True, default=0)),
('accept_count', models.IntegerField(blank=True, default=0)),
('reply_count', models.IntegerField(blank=True, db_index=True, default=0)),
('comment_count', models.IntegerField(blank=True, default=0)),
('vote_count', models.IntegerField(blank=True, db_index=True, default=0)),
('thread_votecount', models.IntegerField(db_index=True, default=0)),
('view_count', models.IntegerField(blank=True, db_index=True, default=0)),
('book_count', models.IntegerField(default=0)),
('subs_count', models.IntegerField(default=0)),
('creation_date', models.DateTimeField(db_index=True)),
('lastedit_date', models.DateTimeField(db_index=True)),
('sticky', models.BooleanField(default=False)),
('content', models.TextField(default='')),
('html', models.TextField(default='')),
('tag_val', models.CharField(blank=True, default='', max_length=100)),
('uid', models.CharField(db_index=True, max_length=32, unique=True)),
('spam', models.IntegerField(choices=[(0, 'Spam'), (1, 'Not spam'), (2, 'Default')], default=2)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('last_contributor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contributor', to=settings.AUTH_USER_MODEL)),
('lastedit_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='editor', to=settings.AUTH_USER_MODEL)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='forum.Post')),
('root', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='descendants', to='forum.Post')),
('site', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sites.Site')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
('thread_users', models.ManyToManyField(related_name='thread_users', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.IntegerField(choices=[(0, 'Upvote'), (4, 'Empty'), (1, 'DownVote'), (2, 'Bookmark'), (3, 'Accept')], db_index=True, default=4)),
('date', models.DateTimeField(auto_now_add=True, db_index=True)),
('uid', models.CharField(max_length=32, unique=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='forum.Post')),
],
),
migrations.CreateModel(
name='PostView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.GenericIPAddressField(blank=True, default='', null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_views', to='forum.Post')),
],
),
migrations.CreateModel(
name='Award',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('uid', models.CharField(max_length=32, unique=True)),
('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.Badge')),
('post', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='forum.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uid', models.CharField(max_length=32, unique=True)),
('type', models.IntegerField(choices=[(2, 'no messages'), (3, 'default'), (0, 'local messages'), (1, 'email'), (4, 'email for every new thread (mailing list mode)')], default=0)),
('date', models.DateTimeField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subs', to='forum.Post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'post')},
},
),
]
|
""" Plot matrix of pairwise distance histograms for two sets of conformations. """
from conformation.compare_pairwise_distance_histograms import compare_pairwise_distance_histograms, Args
if __name__ == '__main__':
compare_pairwise_distance_histograms(Args().parse_args())
|
import numpy as np
import sys
sys.setrecursionlimit(10000)
class Knapsack:
def __init__(self, txt_name):
self.size = 0
self.num_items = 0
self.items = [(0, 0)] # (value, weight)
self.array = np.array([])
self.read_txt(txt_name)
self.cache = {}
self.value = self.compute_value(self.size, self.num_items)
def read_txt(self, txt_name):
with open(txt_name) as f:
first_line = f.readline().rstrip("\n").split(" ")
print("Knapsack size: " + first_line[0])
self.size = int(first_line[0])
print("Number of items: " + first_line[1])
self.num_items = int(first_line[1])
self.array = np.zeros(shape=(self.num_items+0, self.size), dtype=int) # add one row of 0
for line in f:
str_list = line.rstrip("\n").split(' ')
item = tuple(map(int, str_list))
self.items.append(item)
def compute_value(self, weight, index):
if index == 0 or weight == 0:
return 0
(this_value, this_weight) = self.items[index]
# thie item weight is bigger than the weight size, no solution, decrease the index
if this_weight > weight:
if (weight, index - 1) not in self.cache:
self.cache[(weight, index - 1)] = self.compute_value(weight, index - 1)
return self.cache[(weight, index - 1)]
else:
# solution including this item
if (weight - this_weight, index - 1) not in self.cache:
self.cache[(weight - this_weight, index - 1)] = self.compute_value(weight - this_weight, index - 1)
solution_including_this_item = this_value + self.cache[(weight - this_weight, index - 1)]
if (weight, index - 1) not in self.cache:
self.cache[(weight, index - 1)] = self.compute_value(weight, index - 1)
solution_without_this_item = self.cache[(weight, index - 1)]
return max(solution_including_this_item, solution_without_this_item)
if __name__ == "__main__":
# k = Knapsack("knapsack1.txt")
k = Knapsack("knapsack_big.txt")
print(k.value)
|
from flask import render_template, request, session, redirect
from . import app, db, default_error, send_email
from .models import School, Department, Course, CourseRequest
@app.route('/')
def index():
""" Renders the home page where the user selects their school. """
return render_template('index.html', schools=School.query.all())
def GetSchool(school):
""" Converts a school name into the database object if it exists. """
return School.query.filter_by(name=school).first()
def GetCourses(school, department):
""" Gets a department's courses from its name and the schools name. """
school = GetSchool(school)
if school is None:
return None
department = Department.query.filter(Department.school_id == school.id,
Department.name == department).first()
if department is None:
return None
return department.courses.all()
@app.route('/<school_name>/')
def department_page(school_name):
""" Renders the page where the user selects their department. """
school = School.query.filter(School.name == school_name).first()
if school is None:
return default_error, 400
return render_template('departments.html', departments=school.departments.all())
@app.route('/<school>/<department>')
def course_page(school, department):
""" Renders the page where the user selects their course. """
courses = GetCourses(school, department)
if courses is None:
return default_error, 400
return render_template('courses.html', courses=courses)
@app.route('/email', methods=['POST'])
def email_page():
""" Renders the email page. """
if 'course' in request.form:
return render_template('email.html', course_id=request.form['course'])
return default_error, 400
@app.route('/confirmation', methods=['POST'])
def confirmation_page():
""" Renders the confirmation page if the users request is successful or an already exists page. """
if 'course' in request.form and 'email' in request.form:
email = request.form['email']
course_id = request.form['course']
course = Course.query.filter(Course.id == course_id).first()
if course is None:
return default_error, 400
previous_request = CourseRequest.query.filter(CourseRequest.email == email,
CourseRequest.course_id == course_id).first()
if previous_request is not None:
return render_template('alreadyexists.html', course=course, email=email)
db.session.add(CourseRequest(email=email, course_id=course_id))
db.session.commit()
send_email([email], 'Course Watcher Conformation',
'This email confirms that we will send you an email when %s opens.'
% (course.name))
return render_template('confirmation.html', course=course, email=email)
return default_error, 400
|
from pynput import keyboard
import pyglet
from motor_test import motor
M1 = motor(18, 20)
M2 = motor(12, 21)
M3 = motor(13, 5)
M4 = motor(19, 6)
motors = [M1, M2, M3, M4]
window = pyglet.window.Window(width=10, height=10)
@window.event
def on_key_press(key, mod):
key = chr(key)
print("Pressed", key)
if (key == "w"):
for mot in motors:
mot.setDirection(1, 0)
mot.setSpeed(50)
elif (key == "s"):
for mot in motors:
mot.setDirection(0, 1)
mot.setSpeed(50)
elif (key == "d"):
for mot in motors:
mot.setSpeed(50)
motors[0].setDirection(1,0)
motors[1].setDirection(1,0)
motors[2].setDirection(0,1)
motors[3].setDirection(0,1)
elif (key == "a"):
for mot in motors:
mot.setSpeed(50)
motors[0].setDirection(0,1)
motors[1].setDirection(0,1)
motors[2].setDirection(1,0)
motors[3].setDirection(1,0)
elif (key == "q"):
for mot in motors:
mot.setSpeed(0)
def rel(key):
print("Something released")
#listener = keyboard.Listener(onpress=on_press, onrelease=rel)
#with keyboard.Listener(onpress=on_press, onrelease=rel) as l:
# l.join()
pyglet.app.run()
listener.start()
import time
while (True):
print("Hello")
time.sleep(1)
|
""" Data wrapper class
Manages loading data and prepping batches. Dynamically stores and loads chunks of data to disk to support datasets
larger than the available RAM. Uses numpy for saving and loading data, as it is much faster than pickle and allows
compression.
It can be instantiated with 4 different shuffle_* flags, each responsible for random behavior. If a deterministic
order of data is required in an already existing object, it is enough to temporarily set shuffle_batch_on_return and
shuffle_in_chunk_on_chunk_reload flags to False before iterating.
Parameters:
data_path - str or list - path to file(s) containing data
train_ids - int or list - ids specifying datasets used for training, may be one or multiple files
valid_id - int - id specifying dataset used for validation, has to be one file only
test_id - int or None - id specifying dataset used for testing, may be one file or none
eval_ids - int or list or None - ids specifying datasets that are used for neither of the above purposes, but just
evaluation
create_chunks - bool - if False, does not load the data from the files specified, but from temporary chunks. If
chunks do not exist the program fails. Use it to speed up loading huge datasets.
chunk_size - int - number of tweets in one chunk
batch_size - int - number of tweets in one training batch
shuffle_chunks_on_load - bool - if True, shuffles the chunks while loading data from files
shuffle_in_chunks_on_load - bool - if True, shuffles tweets inside chunks while loading data from files
shuffle_batch_on_return - bool - if True, shuffles tweets inside batch while iterating on dataset
shuffle_in_chunk_on_chunk_reload - bool - if True, shuffles tweets inside the chunk whenever chunk is loaded
rng_seed - int or None - random number generator seed
temp_dir - str - path to the directory to store the chunks in
"""
from __future__ import print_function
import cPickle as pickle
import numpy as np
import os
__all__ = [
"DataWrapper"
]
class DataWrapper:
def __init__(self,
data_path,
train_ids,
valid_id,
test_id=None,
eval_ids=None,
create_chunks=True,
chunk_size=10000,
batch_size=200,
shuffle_chunks_on_load=True,
shuffle_in_chunks_on_load=True,
shuffle_batch_on_return=True,
shuffle_in_chunk_on_chunk_reload=True,
rng_seed=None,
temp_dir='temp_chunks',
print_progress=True):
self.data_path = data_path
if isinstance(self.data_path, basestring):
self.data_path = [self.data_path]
self.dataset_names = []
for path in self.data_path:
self.dataset_names.append(os.path.basename(path))
self.temp_dir = temp_dir
self.chunk_size = chunk_size // batch_size * batch_size # make chunk_size a multiple of batch_size
self.batch_size = batch_size
self.shuffle_chunks_on_load = shuffle_chunks_on_load
self.shuffle_in_chunks_on_load = shuffle_in_chunks_on_load
self.shuffle_batch_on_return = shuffle_batch_on_return
self.shuffle_in_chunk_on_chunk_reload = shuffle_in_chunk_on_chunk_reload
if rng_seed is not None:
np.random.seed(rng_seed)
self.rng_seed = rng_seed
self.create_chunks = create_chunks
self.n_datasets = len(self.data_path)
self.print_progress = print_progress
if train_ids is None:
raise ValueError('Specify at least one train id.')
if isinstance(train_ids, (int, long)):
train_ids = [train_ids]
self.train_ids = train_ids
if valid_id is None:
raise ValueError('Specify at least one validation id.')
self.valid_id = valid_id
self.test_id = test_id
if isinstance(eval_ids, (int, long)):
eval_ids = [eval_ids]
if eval_ids is None:
eval_ids = []
self.eval_ids = eval_ids
self.max_len = 0
self.labels = []
self.n_labels = 0
self.charset_map = {}
self.charset_size = 0
self.n_tweets = []
self.n_chunks = []
self.n_batches = []
self.x = None
self.x_mask = None
self.y = None
self.current_batch = 0
self.current_chunk = 0
self.current_data = 0
self.__load_data_params()
self.__load_data()
def __iter__(self):
return self
def next(self):
if self.current_batch < self.n_batches[self.current_data]:
batch = self.__get_batch(self.current_batch)
self.current_batch += 1
return batch
else:
self.current_batch = 0
raise StopIteration()
def set_current_data(self, no):
if 0 <= no < len(self.data_path):
self.current_data = no
self.current_batch = 0
self.current_chunk = 0
self.__load_chunk(0)
def __get_batch(self, batch_id):
if self.n_chunks[self.current_data] == 1:
current_batch_in_chunk = batch_id
else:
# Load another chunk if necessary
if not self.__is_batch_in_chunk(batch_id, self.current_chunk):
self.__load_chunk(self.__get_chunk_id_of_batch(batch_id))
current_batch_in_chunk = batch_id % (self.chunk_size / self.batch_size)
current_slice = range(current_batch_in_chunk * self.batch_size,
(current_batch_in_chunk + 1) * self.batch_size)
if self.shuffle_batch_on_return:
np.random.shuffle(current_slice)
return self.x[current_slice], self.x_mask[current_slice], self.y[current_slice]
def __is_batch_in_chunk(self, batch_id, chunk_id):
return self.chunk_size * chunk_id <= batch_id * self.batch_size < self.chunk_size * (chunk_id + 1)
def __get_chunk_id_of_batch(self, batch_id):
return batch_id * self.batch_size // self.chunk_size
def __load_data_params(self):
if self.create_chunks:
for i_path, path in enumerate(self.data_path):
with open(path, 'rb') as pfile:
tweets = pickle.load(pfile)
self.n_tweets.append(len(tweets))
for iTweet, tweet_entry in enumerate(tweets):
tweet_text = tweet_entry[1]
tweet_sentiment = tweet_entry[2]
if len(tweet_text) > self.max_len:
self.max_len = len(tweet_text)
for symbol in tweet_text:
if symbol not in self.charset_map:
self.charset_map[symbol] = self.charset_size
self.charset_size += 1
if tweet_sentiment not in self.labels:
self.labels.append(tweet_sentiment)
self.n_labels += 1
self.n_chunks.append((self.n_tweets[i_path] - 1) / self.chunk_size + 1)
self.n_batches.append((self.n_tweets[i_path] - 1) / self.batch_size + 1)
self.__save_chunk_info()
else:
self.__load_chunk_info()
def __save_chunk_info(self):
if not os.path.isdir(self.temp_dir):
os.mkdir(self.temp_dir)
with open(os.path.join(self.temp_dir, 'chunk_info.p'), 'wb') as pfile:
pickle.dump([self.max_len,
self.labels,
self.n_labels,
self.charset_map,
self.charset_size,
self.n_tweets,
self.n_chunks,
self.n_batches], pfile)
def __load_chunk_info(self):
with open(os.path.join(self.temp_dir, 'chunk_info.p'), 'rb') as pfile:
[self.max_len,
self.labels,
self.n_labels,
self.charset_map,
self.charset_size,
self.n_tweets,
self.n_chunks,
self.n_batches] = pickle.load(pfile)
def __load_data(self):
if self.create_chunks:
self.symbols_loaded = 0
for i_path, path in enumerate(self.data_path):
self.current_data = i_path
with open(path, 'rb') as pfile:
if self.print_progress:
print(self.dataset_names[i_path] + ': ', end='')
step = max(self.n_tweets[i_path] // 10 + 1, 1)
offset = step * 10 - self.n_tweets[i_path] + 1
if self.print_progress and self.n_tweets[i_path] < 10:
print('.' * (10 - self.n_tweets[i_path]), end='')
chunk_ids = range(self.n_chunks[i_path])
if self.shuffle_chunks_on_load:
# leave the last chunk at its place as it is most probably not full
last_id = chunk_ids[-1]
chunk_ids = chunk_ids[:-1]
np.random.shuffle(chunk_ids)
chunk_ids.append(last_id)
# limit the size in case there is not enough data to fill the whole chunk
if self.n_chunks[i_path] > 1:
data_size = self.chunk_size
else:
data_size = self.n_batches[i_path] * self.batch_size
tweets = pickle.load(pfile)
self.__reset_data(data_size)
chunk_id = 0
for iTweet, tweet_entry in enumerate(tweets):
if self.print_progress and not (iTweet + offset) % step:
print('.', end='')
iTweet %= self.chunk_size
tweet_text = tweet_entry[1]
tweet_sentiment = tweet_entry[2]
for iSym, symbol in enumerate(tweet_text):
self.x[iTweet, iSym] = self.charset_map[symbol]
self.x_mask[iTweet, iSym] = 1
self.symbols_loaded += 1
self.y[iTweet] = int(tweet_sentiment)
if iTweet == self.chunk_size - 1:
# chunk full - save
if self.shuffle_in_chunks_on_load:
self.__shuffle_data()
self.__save_chunk(chunk_ids[chunk_id])
if chunk_id == self.n_chunks[self.current_data] - 2:
# the last chunk may be smaller
data_size = (self.n_batches[i_path] * self.batch_size) % self.chunk_size
self.__reset_data(data_size)
chunk_id += 1
if chunk_id == self.n_chunks[self.current_data] - 1:
if self.shuffle_in_chunks_on_load:
self.__shuffle_data()
self.__save_chunk(chunk_ids[chunk_id])
if self.print_progress:
print('')
self.current_data = 0
self.__load_chunk(0)
def __encode1hot(self):
x_1hot = np.zeros((self.x.shape[0], self.x.shape[1], self.charset_size))
for iTweet, tweet in enumerate(self.x):
for iSym, symbol in enumerate(tweet):
if self.x_mask[iTweet, iSym] == 1:
x_1hot[iTweet, iSym, symbol] = 1
return x_1hot
def __reset_data(self, data_size):
self.x = np.zeros((data_size, self.max_len), dtype=np.uint32)
self.x_mask = np.zeros((data_size, self.max_len), dtype=np.uint32)
self.y = np.zeros(data_size, dtype=np.uint32)
def __shuffle_data(self):
current_slice = range(self.y.shape[0])
np.random.shuffle(current_slice)
self.x = self.x[current_slice]
self.x_mask = self.x_mask[current_slice]
self.y = self.y[current_slice]
def __save_chunk(self, chunk_id):
if not os.path.isdir(self.temp_dir):
os.mkdir(self.temp_dir)
file_path = os.path.join(self.temp_dir, 'chunk_' + str(self.current_data) + '_' + str(chunk_id) + '.npz')
with open(file_path, 'wb') as pfile:
np.savez_compressed(pfile, x=self.x, x_mask=self.x_mask, y=self.y)
def __load_chunk(self, chunk_id):
file_path = os.path.join(self.temp_dir, 'chunk_' + str(self.current_data) + '_' + str(chunk_id) + '.npz')
with np.load(file_path) as vals:
self.x = vals['x']
self.x_mask = vals['x_mask']
self.y = vals['y']
self.current_chunk = chunk_id
if self.shuffle_in_chunk_on_chunk_reload:
self.__shuffle_data()
self.x = self.__encode1hot()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from telegram.ext import Updater, MessageHandler, Filters
import yaml
from telegram_util import log_on_fail, AlbumResult
import album_sender
with open('token') as f:
tele = Updater(f.read().strip(), use_context=True)
debug_group = tele.bot.get_chat(-1001198682178)
@log_on_fail(debug_group)
def cut(update, context):
msg = update.effective_message
if msg.chat_id == debug_group.id or msg.media_group_id:
return
file = msg.document or (msg.photo and msg.photo[-1])
file_path = (file and file.get_file().file_path) or msg.text or ''
if not file_path.startswith('http'):
return
result = AlbumResult()
result.cap = msg.caption_markdown or msg.text_markdown or ''
result.imgs = [file_path]
album_sender.send_v2(msg.chat, result, send_all=True, size_factor=2.1)
tele.dispatcher.add_handler(MessageHandler(Filters.all, cut))
tele.start_polling()
tele.idle()
|
#!/usr/bin/env python
#-*- encoding=utf-8 -*-
'''
purpose: 实现命令行下的EMS(邮件营销系统)的管理
author : set_daemon@126.com
date : 2017-06-23
history:
'''
import sys
file_dir = sys.path[0]
sys.path.insert(0, file_dir + "/./")
sys.path.insert(0, file_dir + "/../")
sys.path.insert(0, file_dir + "/../common")
sys.path.insert(0, file_dir + "/../clients")
reload(sys)
import threading
import uuid
import json
import base64
import datetime
from msg_client import MsgClient
from proto import msg_pb2
from utils import *
from mail_creator import MailCreator
from node_client import NodeClient,NodeInfo
class MailShell(object):
'''
通过scheduler来最终执行命令,包括:
1)邮箱管理:上线、更新、下线,查看邮箱状态
2)Job管理:上线、更新、下线,查看Job状态
3)任务管理:(由scheduler自动调配),可通过Job查询任务执行状态
4)Worker管理:上线、下线、更新工作邮箱,查看worker信息
'''
cmds = [
"quit", "exit", "help", "mail", "job", "worker", "msg", "scheduler"
]
def __del__(self):
self.node_client.destroy(self.node_info)
def __init__(self, config):
self.config = config
self.start_time = datetime.datetime.now()
self.node_info = NodeInfo()
self.node_info.create(**{
"nodeType": "shell",
"nodeId": uuid.uuid1(),
"msgScheme": self.config["shell_config"]["scheme"],
"status": 1
})
self.node_info.set(**{
"channel": "msgchn:%s:%s" %(self.node_info.info["nodeType"]
,self.node_info.info["nodeId"]),
"instId": self.node_info.info["nodeId"],
"logTime": datetime.datetime.strftime(self.start_time, "%Y-%m-%d %H:%M:%S")
})
# 节点管理信息
self.node_client = NodeClient(self.config["node_config"])
self.node_client.sync_node(self.node_info)
# 节点消息通道
self.msg_cli = MsgClient(**{
"scheme": self.config["shell_config"]["scheme"],
"channel_name": self.node_info.info["channel"],
})
# scheduler消息通道
node_keys = self.node_client.get_nodes("scheduler")
node_info = None
for key in node_keys:
key = key.lstrip('node:')
node_info = self.node_client.get_node_info(key)
if node_info.info["status"] == "1":
break
self.sch_msg_cli = MsgClient(**{
"scheme": node_info.info["msgScheme"],
"channel_name": node_info.info["channel"]
})
@staticmethod
def arg_parse(args, flags_predef):
'''
解析有名和无名参数
参数格式 -n huang -a 12
flags格式,参数名之间以分号分隔,举例:
n/name:- 表示参数名称为name,简写为n,参数值取到下一个以"-"开始的参数为止
n/name:+ 表示参数名为name,简写为n,参数为1个,以+的个数为参数个数
n/name: 表示参数名为name,简写为n,无参数
'''
arg_defs = {}
flags = flags_predef.split(';')
for flag in flags:
segs = flag.split(':')
arg_name_segs = segs[0].split('/')
arg_num = 0
end_flag = ''
if segs[1] == '-':
end_flag = '-'
arg_num = -1
elif segs[1] == '':
arg_num = 0
else:
arg_num = len(segs[1].split('+'))
# split的段落个数要比+个数多1
if arg_num > 0:
arg_num -= 1
for arg_name in arg_name_segs:
arg_defs[arg_name] = {
'end_flag': end_flag,
'arg_num': arg_num
}
kv = {
"__NN":[]
}
i = 0
arg_len = len(args)
while i < arg_len:
arg = args[i]
if arg.startswith('-'):
arg_name = arg.lstrip('-')
if arg_name in arg_defs:
arg_def = arg_defs[arg_name]
kv[arg_name] = []
# 判断结束符
if arg_def['end_flag'] == '-':
# 该参数名的值取到下一个-开始
i += 1
while i < arg_len and args[i].startswith('-') != True:
arg_v = args[i]
kv[arg_name].append(arg_v)
i += i
else:
# 按数量取
k = 0
i += 1
while i < arg_len and k < arg_def["arg_num"]:
arg_v = args[i]
kv[arg_name].append(arg_v)
k += 1
i += 1
else: # 不在定义范围,则将参数值取完直到参数为-开头
kv[arg_name] = []
i += 1
while i < arg_len and args[i].startswith('-') != True:
arg_v = args[i]
kv[arg_name].append(arg_v)
i += 1
else:
kv["__NN"].append(arg)
i += 1
return kv
def parse_cmd(self, cmdline):
segs = cmdline.strip('\t \n').split(' ')
seg_len = len(segs)
if seg_len < 1:
return None
cmd_name = segs[0]
if cmd_name not in MailShell.cmds:
return None
cmd = {
"name": cmd_name,
"op": "",
"params": None
}
if cmd_name == "quit" or cmd_name == "exit" or cmd_name == "help":
pass
else:
if seg_len < 2:
return None
cmd["op"] = segs[1]
if cmd_name == "mail":
cmd["params"] = MailShell.arg_parse(segs[2:], 'a/account:+;A/alias:+;p/password:+;h/host:+;P/port:+;s/ssl:+')
elif cmd_name == "job":
cmd["params"] = MailShell.arg_parse(segs[2:], 'j/job:+;f/job_file:+')
elif cmd_name == "worker":
cmd["params"] = MailShell.arg_parse(segs[2:], 'w/worker:+')
elif cmd_name == "scheduler":
pass
elif cmd_name == "msg":
#cmd["params"] = MailShell.arg_parse(segs[2:], '')
pass
else:
pass
return cmd
def exec_mail_cmd(self, cmd):
op = cmd["op"]
msg_data = ""
if op == "on":
# 检查及合并参数
req = msg_pb2.MailOnReq()
req.header.type = msg_pb2.MAIL_ON_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["a", "account"]:
req.account = param_v[0]
elif key in ["A", "alias"]:
req.alias = unicode(param_v[0],"utf-8")
elif key in ["p", "password"]:
req.password = param_v[0]
elif key in ["h", "host"]:
req.smtpHost = param_v[0]
elif key in ["P", "port"]:
req.smtpPort = param_v[0]
elif key in ["s", "ssl"]:
ssl = int(param_v[0])
req.useSsl = (ssl == 1)
msg_data = json.dumps({
"msg_type": msg_pb2.MAIL_ON_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "off":
req = msg_pb2.MailOffReq()
req.header.type = msg_pb2.MAIL_OFF_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["a", "account"]:
req.account = param_v[0]
msg_data = json.dumps({
"msg_type": msg_pb2.MAIL_OFF_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "update":
req = msg_pb2.MailUpdateReq()
req.header.type = msg_pb2.MAIL_UPDATE_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["a", "account"]:
req.account = param_v[0]
elif key in ["A", "alias"]:
req.alias = unicode(param_v[0], 'utf-8')
elif key in ["p", "password"]:
req.password = param_v[0]
elif key in ["h", "host"]:
req.smtpHost = param_v[0]
elif key in ["P", "port"]:
req.smtpPort = param_v[0]
elif key in ["s", "ssl"]:
if param_v[0] == "0":
req.useSsl = False
else:
req.useSsl = True
msg_data = json.dumps({
"msg_type": msg_pb2.MAIL_UPDATE_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "status":
req = msg_pb2.MailStatusReq()
req.header.type = msg_pb2.MAIL_STATUS_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["a", "account"]:
req.account = param_v[0]
msg_data = json.dumps({
"msg_type": msg_pb2.MAIL_STATUS_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
else:
pass
self.sch_msg_cli.send_msg(msg_data)
def exec_job_cmd(self, cmd):
op = cmd["op"]
msg_data = ""
if op == "on":
req = msg_pb2.JobOnReq()
req.header.type = msg_pb2.JOB_ON_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["f", "job_file"]:
# 读取job文件
job_file = param_v[0]
file_data = open(job_file, 'r').read()
jobj = json.loads(file_data)
req.owner = jobj["owner"]
senders = get_mails(jobj["senders"])
req.senderAccounts.extend(senders)
to_accounts = get_mails(jobj["to"])
req.toAccounts.extend(to_accounts)
mail_desc_file = jobj["mail_desc_file"]
mail_creator = MailCreator()
mail_creator.load(mail_desc_file)
req.mailData = mail_creator.to_msg()
msg_data = json.dumps({
"msg_type": msg_pb2.JOB_ON_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "off":
req = msg_pb2.JobOffReq()
req.header.type = msg_pb2.JOB_OFF_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["j", "job_id"]:
job_id = param_v[0]
req.jobId = job_id
msg_data = json.dumps({
"msg_type": msg_pb2.JOB_OFF_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "update":
req = msg_pb2.JobUpdateReq()
req.header.type = msg_pb2.JOB_UPDATE_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["f", "job_file"]:
# 读取job文件
job_file = param_v[0]
file_data = open(job_file, 'r').read()
jobj = json.loads(file_data)
req.owner = jobj["owner"]
senders = get_mails(jobj["senders"])
req.senderAccounts.extend(senders)
to_accounts = get_mails(jobj["to"])
req.toAccounts.extend(to_accounts)
mail_desc_file = jobj["mail_desc_file"]
mail_creator = MailCreator()
mail_creator.load(mail_desc_file)
req.mailData = mail_creator.to_msg()
elif key in ["j", "job_id"]:
req.jobId = param_v[0]
msg_data = json.dumps({
"msg_type": msg_pb2.JOB_UPDATE_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "status":
req = msg_pb2.JobStatusReq()
req.header.type = msg_pb2.JOB_STATUS_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["j", "job_id"]:
job_id = param_v[0]
req.jobId = job_id
# 检查参数
msg_data = json.dumps({
"msg_type": msg_pb2.JOB_STATUS_REQ,
"msg_data": base64.b64encode(req.SerializeToString())
})
elif op == "list":
req = msg_pb2.JobListReq()
req.header.type = msg_pb2.JOB_LIST_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
msg_data = json.dumps({
"msg_type": req.header.type,
"msg_data": base64.b64encode(req.SerializeToString())
})
else:
pass
self.sch_msg_cli.send_msg(msg_data)
def exec_worker_cmd(self, cmd):
msg_data = ""
op = cmd["op"]
if op == "off":
req = msg_pb2.WorkerOffReq()
req.header.type = msg_pb2.WORKER_OFF_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
for key in cmd["params"].keys():
param_v = cmd["params"][key]
if key in ["w", "worker_id"]:
worker_id = param_v[0]
req.workerId = worker_id
msg_data = json.dumps({
"msg_type": req.header.type,
"msg_data": base64.b64encode(req.SerializeToString())
})
else:
pass
if msg_data != "":
self.sch_msg_cli.send_msg(msg_data)
def exec_scheduler_cmd(self, cmd):
msg_data = ""
op = cmd["op"]
if op == "off":
req = msg_pb2.SchedulerOffReq()
req.header.type = msg_pb2.SCHEDULER_OFF_REQ
req.header.source = "%s:%s" %(self.node_info.info["nodeType"], self.node_info.info["nodeId"])
req.header.dest = "scheduler"
req.header.feedback = self.msg_cli.chn_name
msg_data = json.dumps({
"msg_type": req.header.type,
"msg_data": base64.b64encode(req.SerializeToString())
})
else:
pass
if msg_data != "":
self.sch_msg_cli.send_msg(msg_data)
def exec_msg_cmd(self, cmd):
op = cmd["op"]
if op == "view":
# 读取消息通道
msgs = self.msg_cli.get_msgs(-1)
if msgs is not None and len(msgs) > 0:
for msg in msgs:
self.process_msg(msg)
def process_msg(self, msg):
if msg.header.type == msg_pb2.MAIL_ON_RSP:
print "mail on account %s, returned %d, %s" %(msg.account, msg.code, msg.reason)
elif msg.header.type == msg_pb2.MAIL_OFF_RSP:
print "mail off account %s, returned %d, %s" %(msg.account, msg.code, msg.reason)
elif msg.header.type == msg_pb2.MAIL_UPDATE_RSP:
print "mail update account %s, returned %d, %s" %(msg.account, msg.code, msg.reason)
elif msg.header.type == msg_pb2.MAIL_STATUS_RSP:
print "mail status account %s, returned %d, %s; %s,%s,%s,%s,%d,%d" %(msg.account, msg.code, msg.reason,
msg.alias, msg.password, msg.smtpHost, msg.smtpPort, msg.useSsl, msg.status)
elif msg.header.type == msg_pb2.JOB_ON_RSP:
print "job on job %s, returned %d, %s" %(msg.jobId, msg.code, msg.reason)
elif msg.header.type == msg_pb2.JOB_OFF_RSP:
print "job off job %s, returned %d, %s" %(msg.jobId, msg.code, msg.reason)
elif msg.header.type == msg_pb2.JOB_UPDATE_RSP:
print "job update job %s, returned %d, %s" %(msg.jobId, msg.code, msg.reason)
elif msg.header.type == msg_pb2.JOB_STATUS_RSP:
print "job status job %s, returned %d, %s; %d, %d, %d" %(msg.jobId, msg.code, msg.reason, msg.completedNum, msg.successNum, msg.totalNum)
elif msg.header.type == msg_pb2.JOB_LIST_RSP:
print "job list {%s}, returned %d, %s" %(",".join(msg.jobList), msg.code, msg.reason)
elif msg.header.type == msg_pb2.WORKER_LIST_RSP:
pass
elif msg.header.type == msg_pb2.WORKER_STATUS_RSP:
pass
else:
pass
def exec_help(self):
print " quit or exit"
print " msg {view}"
print " mail {on/off/update/status} -a account -A alias -p password -h smtp_host -P smtp_port -s security"
print " job {on/off/update/status/list} -j job_id -f job_file"
print " worker {list/status/off} -w worker_id"
print " scheduler {off}"
def run(self):
'''
启动运行,并接受命令输入
命令格式:
quit
exit
mail {on/off/update/status} -a account -A alias -p password -h smtp_host -P smtp_port -s security
job {on/off/update/status/list} -j job_id -f job_file
worker {list/status} -w worker_id
scheduler {off}
msg view
'''
while True:
cmdline = raw_input("%s-%s>> " %(self.node_info.info["nodeType"], self.node_info.info["nodeId"]))
# 解析命令
cmd = self.parse_cmd(cmdline)
if cmd is None:
print "command %s does not support!" %(cmdline)
continue
if cmd["name"] == "quit" or cmd["name"] == "exit":
print "Quit..."
break
elif cmd["name"] == "help":
self.exec_help()
elif cmd["name"] == "mail":
self.exec_mail_cmd(cmd)
elif cmd["name"] == "job":
self.exec_job_cmd(cmd)
elif cmd["name"] == "worker":
self.exec_worker_cmd(cmd)
elif cmd["name"] == "msg":
self.exec_msg_cmd(cmd)
elif cmd["name"] == "scheduler":
self.exec_scheduler_cmd(cmd)
else:
pass
if __name__ == "__main__":
from config import global_config
shell = MailShell(global_config)
shell.run()
'''
'''
|
import re, cStringIO, logging
import striga.core.context
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
try:
import html2text
except:
html2text = None
###
#TODO: Based on security settings send only partial information (no version)
StrigaVersion = 'Striga Server %s' % striga.Version
###
L = logging.getLogger('mail.context')
###
class Request(striga.core.context.Request):
def __init__(self, Path, RemoteAddress):
striga.core.context.Request.__init__(self, Path, RemoteAddress)
###
class Response(striga.core.context.Response):
def __init__(self):
striga.core.context.Response.__init__(self, contexttype='text/plain')
self.Subject = None
self.From = None
self.To = []
self.CC = []
self.BCC = []
self.ReplyTo = None
self.Attachments = []
self.__OutputBuffer = cStringIO.StringIO()
def SetCacheAge(self, age):
#TODO: Generalize this ...
pass
def Write(self, data):
self.__OutputBuffer.write(data)
def WriteFile(self, inpfile):
bufsize = 32*1024
while 1:
data = inpfile.read(bufsize)
if len(data) == 0:
break
self.__OutputBuffer.write(data)
def Flush(self):
pass
def AddAttachment(self, mimeobj, filename):
mimeobj.add_header('Content-Disposition', 'attachment', filename=filename)
self.Attachments.append(mimeobj)
def AddContent(self, mimeobj, contentid):
mimeobj.add_header('Content-ID', "<"+contentid+">")
self.Attachments.append(mimeobj)
ContentTypeRG = re.compile('[a-zA-Z0-9]+/[a-zA-Z0-9]+')
def GetMessage(self):
assert self.Subject is not None, "You need to set ctx.res.Subject for mail"
assert self.From is not None, "You need to set ctx.res.From for mail"
ct = self.ContentTypeRG.findall(self.ContentType)[0]
msg = MIMEMultipart('related', charset='utf-8')
msg.preamble = 'This is a multi-part message in MIME format.'
msg['X-Powered-By'] = StrigaVersion
msg['Subject'] = self.Subject
msg['From'] = self.From
msg['To'] = ', '.join(self.To)
if len(self.CC) > 0: msg['CC'] = ', '.join(self.CC)
if self.ReplyTo is not None: msg['Reply-To'] = self.ReplyTo
if ct == 'text/html':
msgAlternative = MIMEMultipart('alternative')
msg.attach(msgAlternative)
if html2text is not None:
txt = self.__OutputBuffer.getvalue()
#TODO: Following code is quite heavy - but as html2text has problem handling utf-8, we need to convert to Unicode and then back
txt = txt.decode('utf-8') # Convert to Unicode
txt = html2text.html2text(txt)
txt = txt.encode('utf-8') # Convert to UTF-8
part1 = MIMEText(txt, 'plain', _charset='utf-8')
del txt
msgAlternative.attach(part1)
del part1
else:
L.warning("Python module html2text not found - use 'easy_install-2.7 html2text' to install that")
part2 = MIMEText(self.__OutputBuffer.getvalue(), 'html', _charset='utf-8')
self.__OutputBuffer.close()
msgAlternative.attach(part2)
del part2
#TODO: Implement support for 'text/plain' and binaries (images) ...
else:
raise NotImplementedError("Mail response cannot handle content type of '{0}' (yet)".format(ct))
for atta in self.Attachments:
msg.attach(atta)
return msg.as_string()
|
from selenium.webdriver.support.ui import Select
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def add(self, contact):
# Add Contact
wd = self.app.wd
self.app.wd.get("http://localhost/addressbook/edit.php")
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstname)
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middlename)
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastname)
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickname)
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.photo_title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_phone)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_phone)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.work_phone)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.fax)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys(contact.email2)
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys(contact.email3)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys(contact.homepage)
wd.find_element_by_name("bday").click()
Select(wd.find_element_by_name("bday")).select_by_visible_text("1")
wd.find_element_by_name("bmonth").click()
Select(wd.find_element_by_name("bmonth")).select_by_visible_text("June")
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").send_keys(contact.byears)
wd.find_element_by_name("aday").click()
Select(wd.find_element_by_name("aday")).select_by_visible_text("9")
wd.find_element_by_name("amonth").click()
Select(wd.find_element_by_name("amonth")).select_by_visible_text("July")
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys(contact.byears)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.notes)
wd.find_element_by_xpath("(//input[@name='submit'])[2]").click()
wd.find_element_by_link_text("home page").click()
self.contact_cache = None
def delete_contact_by_index(self, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
wd.find_element_by_link_text("home").click()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def edit_first_contact(self):
self.edit_contact_by_index(0)
def edit_contact_by_index(self, index, contact):
wd = self.app.wd
self.select_contact_by_index(index)
self.edit_contact(contact)
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("home page").click()
self.contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_elements_by_xpath("(//img[@alt='Edit'])")[index].click()
def edit_contact(self, contact):
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("photo_title", contact.photo_title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home_phone)
self.change_field_value("mobile", contact.mobile_phone)
self.change_field_value("fax", contact.fax)
self.change_field_value("work", contact.work_phone)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
self.change_field_value("bday", contact.bday)
self.change_field_value("bmonth", contact.bmonth)
self.change_field_value("byears", contact.byears)
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes)
self.contact_cache = None
def count(self):
wd = self.app.wd
self.app.wd.get("http://localhost/addressbook/index.php")
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.wd.get("http://localhost/addressbook/index.php")
self.contact_cache = []
for element in wd.find_elements_by_xpath("//tr[@name='entry']"):
firstname = element.find_element_by_css_selector("td:nth-child(3)").text
lastname = element.find_element_by_css_selector("td:nth-child(2)").text
address = element.find_element_by_css_selector("td:nth-child(4)").text
all_emails = element.find_element_by_css_selector("td:nth-child(5)").text
id = element.find_element_by_name("selected[]").get_attribute("value")
all_phones = element.find_element_by_css_selector("td:nth-child(6)").text
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, address=address, all_emails_from_home_page=all_emails, id=id, all_tels_from_home_page=all_phones))
return list(self.contact_cache)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.select_contact_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").text
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home_phone = wd.find_element_by_name("home").get_attribute("value")
work_phone = wd.find_element_by_name("work").get_attribute("value")
mobile_phone = wd.find_element_by_name("mobile").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, address=address, email=email, email2=email2,
email3=email3, id=id, home_phone=home_phone, mobile_phone=mobile_phone,
work_phone=work_phone, phone2=phone2)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.app.open_homepage()
wd.find_elements_by_xpath("(//img[@alt='Details'])")[index].click()
text = wd.find_element_by_id("content").text
home_phone = re.search("H: (.*)", text).group(1)
work_phone = re.search("W: (.*)", text).group(1)
mobile_phone = re.search("M: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone, phone2=phone2)
|
from bs4 import BeautifulSoup
import requests
import json
url = "http://nutrias.org/~nopl/photos/wpa/wpa30.htm"
all_my_data = []
home_page = requests.get(url)
home_page_html = home_page.text
soup = BeautifulSoup(home_page_html, 'html.parser')
column_map = {
1: "series_number",
2: "project",
3: "series_title",
4: "description",
5: "date_taken",
6: "project_dates",
7: "OP",
8: "negatives",
9: "contact_prints",
10: "8x10_prints",
11: "digital_photos",
}
records = soup.find_all('tr')
for record in records:
my_data = {
}
fields = record.find_all("td")
counter = 0
for entry in fields:
counter = counter + 1
label = column_map[counter]
try:
data_rows = entry.find("font")
data_rows = data_rows.text
my_data[label] = data_rows
except AttributeError:
continue
image_urls = []
item_link = record.find_all('a')
for link in item_link:
abs_url = "http://nutrias.org/~nopl/photos/wpa/" + link['href']
image_urls.append(abs_url)
my_data[label] = image_urls
all_my_data.append(my_data)
with open('WPA_Music_Collection.json', 'w') as f_object:
json.dump(all_my_data, f_object, indent=2)
print("Your file is now ready")
|
import os, os.path as p
import logzero
import logging
from logzero import logger
LOG_LEVEL = logging.INFO
logdir = p.abspath(p.join(p.dirname(__file__), "../logs"))
os.makedirs(logdir, exist_ok=True)
logfile = "logs.txt"
# Setup rotating logfile with 3 rotations, each with a maximum filesize of 1MB:
logzero.logfile(p.join(logdir, logfile), maxBytes=1e6, backupCount=3)
logzero.loglevel(LOG_LEVEL)
|
# Approach 1:
def N_Largest_Elements(Arr, Upto):
result = []
for i in range(0, Upto):
max = 0
for j in range(0, len(Arr)):
if Arr[j] > max:
max = Arr[j]
Arr.remove(max)
result.append(max)
return result
Given_List = []
n = int(input("How Many Elements: "))
print("Enter The Elements: ")
for i in range(0, n):
element = int(input())
Given_List.append(element)
Upto = int(input("Upto Which Number: "))
print(N_Largest_Elements(Given_List, Upto))
# Approach 2:
def N_Largest_Elemnts(Arr, Upto):
return sorted(Arr)[-Upto:]
Given_List = []
n = int(input("How Many Elements: "))
print("Enter The Elements: ")
for i in range(0, n):
element = int(input())
Given_List.append(element)
Upto = int(input("Upto Which Number: "))
print(N_Largest_Elemnts(Given_List, Upto))
|
# coding: utf-8
# # Tables (2) - Making a table
# In this lesson we're going to learn how make a table in Plotly.
#
# We'll learn how to make one from a list and a Pandas DataFrame.
# ## Module Imports
# In[1]:
#plotly.offline doesn't push your charts to the clouds
import plotly.offline as pyo
#allows us to create the Data and Figure objects
from plotly.graph_objs import *
#plotly.plotly pushes your charts to the cloud
import plotly.plotly as py
#pandas is a data analysis library
import pandas as pd
from pandas import DataFrame
# #### New Modules:
#
# In order to make a table in Plotly, we have to use a different library called the <code>'FigureFactory'</code>. This is an in-development part of the Plotly codebase and is liable to change without notice.
#
# The <code>'FigureFactory'</code> gives options for creating many different types of chart . . . I suggest trying a few of them out if you're curious!
# In[2]:
from plotly.tools import FigureFactory as FF
# In[3]:
#lets us see the charts in an iPython Notebook
pyo.offline.init_notebook_mode() # run at the start of every ipython
# ## Making a table from a list
#
# We can use the <code>FigureFactory.create_table()</code> function to create a table. This function actually uses the Heatmap chart type (which we won't cover in this course) to create a table, and we'll use different aspects of the Heatmap chart to make stylistic changes to our tables.
#
# It's very simple to create a table from a list, provided that the list is in the correct format.
#
# The list must be comprised of several sublists. Each sublist will be a row in the table, and the first sublist will be the column titles.
#
# Let's try this out and create a table showing the population and area of the different countries in the United Kingdom. I sourced this data from <a href = "https://en.wikipedia.org/wiki/Population_of_the_countries_of_the_United_Kingdom">Wikipedia</a>.
#
# Here's our table data with the row for the column headings and space for the data:
# In[4]:
tableData = [['Country','Population','Area (km2)'],
[],
[],
[],
[]]
# Here's the table data with the values completed:
# In[5]:
tableData = [['Country','Population','Area (km2)'],
['England',53865800, 130395],
['Scotland',5327700, 78772],
['Wales',3082400,20779],
['Northern Ireland', 1829700, 13843]]
# We can now pass this into the <code>FF.create_table()</code> function and create an object to plot:
# In[6]:
UKCountries = FF.create_table(tableData)
pyo.iplot(UKCountries)
py.image.save_as(UKCountries, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(09) Tables\Notebooks\images\Tables (2) - Making a table\pyo.iplot-0.png")
#
# You can see that Plotly follows many of the recommendations for designing clear tables. The header row is a different colour, the row colours alternate, and the font allows for comparisons between the numbers.
#
# There are still improvements to make which we will look at later in this section.
#
# #### The structure of a table object
#
# Let's have a look at the structure of our table object.
#
# You can see that the data displayed in the table is not contained within the data element of the figure. The data element actually contains the instructions for colouring the rows and header.
#
# The data displayed in the table is stored as annotations which are positioned by the <code>FF.create_table()</code> function. We will use this knowledge later to allow us to style the data in the table.
# In[7]:
UKCountries
# ## Making a table from a DataFrame
#
# Let's get the same data that we used earlier, but rather than writing it out in a dictionary, we'll get it as a csv:
# In[8]:
df = pd.read_csv('http://richard-muir.com/data/public/csv/UKCountryPopulation.csv', index_col = 0)
df
# We can now pass this DataFrame directly into the <code>create_table()</code> function:
# In[9]:
UKCountryInfo = FF.create_table(df)
pyo.iplot(UKCountryInfo)
py.image.save_as(UKCountryInfo, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(09) Tables\Notebooks\images\Tables (2) - Making a table\pyo.iplot-1.png")
#
# So you can see that the entire DataFrame gets translated into a Plotly table, but it looks a bit cramped. We could increase the size of the table to include all the information, but for now, let's reduce the number of columns. We'll take the column for 2011 because we'll use data from another source for 2011 in a later lesson in this section.
# In[10]:
df = df[['Name','Population (2011)', 'Area (km2)']]
UKCountryInfo = FF.create_table(df)
pyo.iplot(UKCountryInfo)
py.image.save_as(UKCountryInfo, r"C:\Users\Rytch\Google Drive\Financial\Passive Income\Online courses\Plotly\Course Content\Lessons\(09) Tables\Notebooks\images\Tables (2) - Making a table\pyo.iplot-2.png")
#
# ### What have we learnt this lesson?
# In this lesson we've seen how to create a table from a list or a DataFrame using the <code>FigureFactory.create_table()</code> function.
#
# We've seen that the <code>create_table()</code> function actually uses the Plotly heatmap as a base for the table, and that the information in the table rows is actually stored as annotations.
#
# In the next lesson we'll learn how to add an index column to the table.
# If you have any questions, please ask in the comments section or email <a href="mailto:me@richard-muir.com">me@richard-muir.com</a>
|
from kronos_modeller.strategy_base import StrategyBase
class FillingStrategy(StrategyBase):
required_config_fields = [
"type",
"priority"
]
|
__author__ = "Dave Wapstra <dwapstra@cisco.com>"
import warnings
from unicon.plugins.iosxe.sdwan import SDWANSingleRpConnection
class SDWANConnection(SDWANSingleRpConnection):
os = 'sdwan'
platform = 'iosxe'
def __init__(self, *args, **kwargs):
warnings.warn(message = "This plugin is deprecated and replaced by 'iosxe/sdwan'",
category = DeprecationWarning)
super().__init__(*args, **kwargs)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import math
class FullyConnected3D(object):
def __init__(self,
input_dim,
output_dim,
active_op='prelu',
version="default"):
self.active_op = active_op
self.version = version
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, bottom_data):
print "call FullyConnected3D"
net_out = paddle.static.nn.fc(
bottom_data,
size=self.output_dim,
num_flatten_dims=2,
activation=None,
weight_attr=paddle.framework.ParamAttr(
name="fc_w_%s" % self.version,
initializer=fluid.initializer.Normal(scale=1.0 / math.sqrt(
(self.input_dim)))),
bias_attr=fluid.ParamAttr(
name="fc_b_%s" % self.version,
initializer=fluid.initializer.Constant(0.1)))
# net_out = paddle.static.nn.fc(
# bottom_data,
# size=self.output_dim,
# num_flatten_dims=2,
# activation=None,
# weight_attr=paddle.framework.ParamAttr(
# name="fc_w_%s" % self.version,
# initializer=fluid.initializer.Constant(1.0 / math.sqrt((self.input_dim)))),
# bias_attr=fluid.ParamAttr(
# name="fc_b_%s" % self.version,
# initializer=fluid.initializer.Constant(0.1)))
if self.active_op == 'prelu':
print "in FullyConnected3D use prelu"
net_out = paddle.static.nn.prelu(
net_out,
'channel',
paddle.framework.ParamAttr(
name='alpha_1_%s' % self.version,
initializer=paddle.nn.initializer.Constant(0.25)))
return net_out
class paddle_dnn_layer(object):
def __init__(self,
input_dim,
output_dim,
active_op='prelu',
use_batch_norm=False,
version="default"):
self.active_op = active_op
self.use_batch_norm = use_batch_norm
self.version = version
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, bottom_data):
print "if mx.symbol.FullyConnected"
out = paddle.static.nn.fc(
bottom_data,
size=self.output_dim,
activation=None,
weight_attr=paddle.framework.ParamAttr(
name="fc_w_%s" % self.version,
initializer=fluid.initializer.Normal(scale=1.0 / math.sqrt(
(self.input_dim)))),
bias_attr=fluid.ParamAttr(
name="fc_b_%s" % self.version,
initializer=fluid.initializer.Constant(0.1)))
# out = paddle.static.nn.fc(
# bottom_data,
# size=self.output_dim,
# activation=None,
# weight_attr=paddle.framework.ParamAttr(
# name="fc_w_%s" % self.version,
# initializer=fluid.initializer.Constant(1.0 / math.sqrt((self.input_dim)))),
# bias_attr=fluid.ParamAttr(
# name="fc_b_%s" % self.version,
# initializer=fluid.initializer.Constant(0.1)))
if self.use_batch_norm:
print "if self.use_batch_norm:"
batch_norm = paddle.nn.BatchNorm(
self.output_dim,
epsilon=1e-03,
param_attr=fluid.ParamAttr(
name="bn_gamma_1_%s" % self.version,
initializer=fluid.initializer.Constant(1.0)),
bias_attr=fluid.ParamAttr(
name="bn_bias_1_%s" % self.version,
initializer=fluid.initializer.Constant(0.0)))
out = batch_norm(out)
if self.active_op == 'prelu':
print "if self.active_op == 'prelu':"
out = paddle.static.nn.prelu(
out,
'channel',
paddle.framework.ParamAttr(
name='alpha_1_%s' % self.version,
initializer=paddle.nn.initializer.Constant(0.25)))
return out
def dnn_model_define(user_input,
unit_id_emb,
node_emb_size=24,
fea_groups="20,20,10,10,2,2,2,1,1,1",
active_op='prelu',
use_batch_norm=True,
with_att=False):
fea_groups = [int(s) for s in fea_groups.split(',')]
total_group_length = np.sum(np.array(fea_groups))
print "fea_groups", fea_groups, "total_group_length", total_group_length, "eb_dim", node_emb_size
layer_data = []
# start att
if with_att:
print("TDM Attention DNN")
att_user_input = paddle.concat(
user_input, axis=1) # [bs, total_group_length, emb_size]
att_node_input = fluid.layers.expand(
unit_id_emb, expand_times=[1, total_group_length, 1])
att_din = paddle.concat(
[att_user_input, att_user_input * att_node_input, att_node_input],
axis=2)
att_active_op = 'prelu'
att_layer_arr = []
att_layer1 = FullyConnected3D(
3 * node_emb_size, 36, active_op=att_active_op, version=1)
att_layer_arr.append(att_layer1)
att_layer2 = FullyConnected3D(
36, 1, active_op=att_active_op, version=2)
att_layer_arr.append(att_layer2)
layer_data.append(att_din)
for layer in att_layer_arr:
layer_data.append(layer.call(layer_data[-1]))
att_dout = layer_data[-1]
att_dout = fluid.layers.expand(
att_dout, expand_times=[1, 1, node_emb_size])
user_input = att_user_input * att_dout
else:
print("TDM DNN")
user_input = paddle.concat(
user_input, axis=1) # [bs, total_group_length, emb_size]
# end att
idx = 0
grouped_user_input = []
for group_length in fea_groups:
block_before_sum = paddle.slice(
user_input, axes=[1], starts=[idx], ends=[idx + group_length])
block = paddle.sum(block_before_sum, axis=1) / group_length
grouped_user_input.append(block)
idx += group_length
grouped_user_input = paddle.concat(
grouped_user_input, axis=1) # [bs, 10 * emb_size]
din = paddle.concat(
[grouped_user_input, paddle.squeeze(
unit_id_emb, axis=1)], axis=1)
net_version = "d"
layer_arr = []
layer1 = paddle_dnn_layer(
11 * node_emb_size,
128,
active_op=active_op,
use_batch_norm=use_batch_norm,
version="%d_%s" % (1, net_version))
layer_arr.append(layer1)
layer2 = paddle_dnn_layer(
128,
64,
active_op=active_op,
use_batch_norm=use_batch_norm,
version="%d_%s" % (2, net_version))
layer_arr.append(layer2)
layer3 = paddle_dnn_layer(
64,
32,
active_op=active_op,
use_batch_norm=use_batch_norm,
version="%d_%s" % (3, net_version))
layer_arr.append(layer3)
layer4 = paddle_dnn_layer(
32,
2,
active_op='',
use_batch_norm=False,
version="%d_%s" % (4, net_version))
layer_arr.append(layer4)
layer_data.append(din)
for layer in layer_arr:
layer_data.append(layer.call(layer_data[-1]))
dout = layer_data[-1]
return dout
# if is_infer:
# softmax_prob = paddle.nn.functional.softmax(dout)
# positive_prob = paddle.slice(
# softmax_prob, axes=[1], starts=[1], ends=[2])
# prob_re = paddle.reshape(positive_prob, [-1])
# _, topk_i = paddle.topk(prob_re, k=topk)
# topk_node = paddle.index_select(label, topk_i)
# return topk_node
# else:
# cost, softmax_prob = paddle.nn.functional.softmax_with_cross_entropy(
# logits=dout, label=label, return_softmax=True, ignore_index=-1)
# ignore_label = paddle.full_like(label, fill_value=-1)
# avg_cost = paddle.sum(cost) / paddle.sum(
# paddle.cast(
# paddle.not_equal(label, ignore_label), dtype='int32'))
# return avg_cost, softmax_prob
|
from abc import ABCMeta
from torch import nn, Tensor
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Callable
from torch.utils.data import DataLoader
from torch.optim.optimizer import Optimizer
class Discriminator(nn.Module):
def __init__(self, in_size):
super().__init__()
self.in_size = in_size
modules = []
in_channels = self.in_size[0]
out_channels = 1024
modules.append(nn.Conv2d(in_channels, 128, kernel_size=4, stride=2, padding=1))
modules.append(nn.BatchNorm2d(128))
modules.append(nn.ReLU())
# 128 x 32 x 32
modules.append(nn.Dropout(0.2))
modules.append(nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1))
modules.append(nn.BatchNorm2d(256))
modules.append(nn.ReLU())
# 256 x 16 x 16
modules.append(nn.Dropout(0.2))
modules.append(nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1))
modules.append(nn.BatchNorm2d(512))
modules.append(nn.ReLU())
# 512 x 8 x 8
modules.append(nn.Dropout(0.2))
modules.append(nn.Conv2d(512, out_channels, kernel_size=4, stride=2, padding=1))
# out_channels x 4 x 4
modules.append(nn.MaxPool2d(kernel_size=4))
#FC layers
self.cnn = nn.Sequential(*modules)
self.linaer1 = nn.Linear(1024, 256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout()
self.linaer2 = nn.Linear(256, 1)
def forward(self, x):
x = self.cnn(x)
batch_size = x.shape[0]
x = x.view(batch_size, -1)
x = self.linaer1(x)
x = self.relu(x)
x = self.dropout(x)
y = self.linaer2(x)
return y
class Generator(nn.Module):
def __init__(self, z_dim, featuremap_size=4, out_channels=3):
super().__init__()
self.z_dim = z_dim
modules = []
self.in_channels = z_dim
modules.append(nn.ConvTranspose2d(self.in_channels, 1024, kernel_size=4))
modules.append(nn.BatchNorm2d(1024))
modules.append(nn.ReLU())
# 1024 x 4 x 4
modules.append(nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2))
modules.append(nn.BatchNorm2d(512))
modules.append(nn.ReLU())
# 512 x 8 x 8
modules.append(nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2))
modules.append(nn.BatchNorm2d(256))
modules.append(nn.ReLU())
# 256 x 16 x 16
modules.append(nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2))
modules.append(nn.BatchNorm2d(128))
modules.append(nn.ReLU())
# 128 x 32 x 32
modules.append(nn.ConvTranspose2d(128, 3, kernel_size=4, stride=2, padding=1))
# 3 x 64 x 64
self.cnn = nn.Sequential(*modules)
def sample(self, n, features, with_grad=False):
device = next(self.parameters()).device
continuous = self.z_dim - features.shape[1]
if with_grad:
continuous_part = torch.randn((n, continuous), device=device, requires_grad=with_grad)
z = torch.cat((continuous_part, features), 1)
samples = self.forward(z)
else:
with torch.no_grad():
continuous_part = torch.randn((n, continuous), device=device, requires_grad=with_grad)
z = torch.cat((continuous_part, features), 1)
samples = self.forward(z)
return samples
def forward(self, z):
x = torch.tanh(self.cnn(z.view(z.shape[0], -1, 1, 1)))
return x
|
from django.db import models
from core.models import TimeStampedEnabledModel, Cities
class AbstractBuilding(TimeStampedEnabledModel):
city = models.ForeignKey(Cities, on_delete=models.DO_NOTHING, null=False)
name = models.CharField(max_length=50)
desc = models.CharField(max_length=100, blank=True)
address = models.CharField(max_length=50)
img_src = models.ImageField(upload_to='buildings/', default=None, null=True)
creator = models.ForeignKey('users.User', on_delete=models.DO_NOTHING, null=False,
default=None)
class Meta:
abstract = True
class Building(AbstractBuilding):
class Meta:
db_table = 'building'
ordering = ['-updated']
def __str__(self):
return self.name
class BuildingHistory(AbstractBuilding):
building = models.ForeignKey(Building, on_delete=models.DO_NOTHING, null=False)
img_src = models.ImageField(upload_to='buildings/history/', default=None, null=True)
class Meta:
db_table = 'building_history'
ordering = ['-pk']
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 08:02:39 2020
@author: Edson Cilos
"""
#Standard packages
import os
import numpy as np
import pandas as pd
#Sklearning package
from sklearn.preprocessing import MinMaxScaler
#Graphics packages
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
#Project packages
import config
from utils import classes_names
from table import best_results
#Still beta, several updates required!
#Best model path:
best_path = os.path.join('results', 'mccv', 'baseline_over_SVC_linear_100.0',
'detailed_score.csv')
mccv_path = config._get_path('mccv')
graphics_path = config._get_path('graphics')
def gs_heatmap(output_name = 'gs_table'):
df, _ = best_results()
c_map = plt.get_cmap('YlGnBu')
c_map = ListedColormap(c_map(np.linspace(0.1, 0.7, 256)))
fig, ax = plt.subplots(figsize=(12, 7))
heat = sns.heatmap(df, annot=True, linewidths= 1,
cmap=c_map, ax = ax, fmt='.4f')
#Ad-hoc
for text in heat.texts:
txt = text.get_text()
n = float(txt)
if(n == 0 or n ==1 ): text.set_text('Yes' if n else 'No')
ax.set_title('Grid Search')
fig.savefig(os.path.join(graphics_path, output_name + '.png'),
dpi = 1200,
bbox_inches = "tight")
return df
def total_score_plot_all():
_total_score_plot(mccv_files(), "Best models")
def _total_score_plot(name_list, main_name):
df_tuples = []
for name in name_list:
df = pd.read_csv(os.path.join(mccv_path, name, 'total_score.csv'))
std = np.std(df[df.columns[1]])
mean = np.mean(df[df.columns[1]])
#label1 = name
label1 = name + ' loss: ' + str(round(mean, 5)) + \
', std: ' + str(round(std, 5))
std = np.std(df[df.columns[3]])
mean = np.mean(df[df.columns[3]])
#label2 = name
label2 = name + ' score: ' + str(round(mean, 5)) + \
', std: ' + str(round(std, 5))
df_tuples.append((df, label1, label2))
total_score_plot(df_tuples, main_name)
def total_score_plot(df_tuples, name):
sns.set_palette(sns.color_palette("hls", len(df_tuples)))
for tup in df_tuples:
plot = sns.distplot(tup[0]["Cross_Entropy_val"],
axlabel = 'Cross Entropy (validation)',
label=tup[1],
)
plt.legend(loc="center", bbox_to_anchor=(0.5, -0.35))
fig = plot.get_figure()
fig.savefig(os.path.join(graphics_path, name + '_cross_entropy.png'),
dpi = 1200,
bbox_inches = "tight")
plt.show()
plt.close()
##The same for accuracy
sns.set_palette(sns.color_palette("hls", len(df_tuples)))
for tup in df_tuples:
plot = sns.distplot(tup[0]["Accuracy_val"],
axlabel = 'Accuracy (validation)',
label=tup[2])
plt.legend(loc="center", bbox_to_anchor=(0.5, -0.35))
fig = plot.get_figure()
fig.savefig(os.path.join(graphics_path, name + '_accuracy.png'),
dpi = 1200,
bbox_inches = "tight")
plt.show()
plt.close()
def self_heatmap():
df = pd.read_csv(os.path.join('results', 'SelfTraining.csv'), index_col=0)
df.index.name = None
df.drop(['base_path'], axis=1, inplace=True)
rename = {'time' : 'Time (s)',
'amount_labaled' : 'Samples labeled',
'accuracy' : 'Accuracy',
'log_loss' : 'Log-los',
'std_log_loss' : 'log-los (std)'}
df.rename(columns = rename, inplace=True)
scaler = MinMaxScaler()
df_dual = pd.DataFrame(data = scaler.fit_transform(df),
columns = df.columns,
index = df.index)
heat0 = sns.heatmap(df, annot=True, linewidths= 1, fmt='.3f')
fig, ax = plt.subplots(figsize=(12, 5))
color_map = plt.get_cmap('YlGnBu')
color_map = ListedColormap(color_map(np.linspace(0.1, 0.75, 256)))
heat = sns.heatmap(df_dual, annot=True, linewidths= 1,
cmap= color_map, ax = ax, fmt='.3f')
colorbar = ax.collections[0].colorbar
colorbar.set_ticks([0.1, 0.5, 1])
colorbar.set_ticklabels(['Low', 'Middle', 'High'])
for t in range(len(heat0.texts)):
txt = heat0.texts[t].get_text()
heat.texts[t].set_text(txt)
ax.set_title('SelfTraining Table (5-fold cross validation)')
fig.savefig(os.path.join(graphics_path, 'SelfTraining_table.png'),
dpi = 1200,)
def best_model_results(model_name = 'baseline_over_SVC_linear_100.0'):
path = os.path.join(mccv_path, model_name)
probability_heatmap(pd.read_csv(os.path.join(path,'probability.csv')),
model_name)
cross_heatmap(pd.read_csv(os.path.join(path,'cross_matrix.csv')),
model_name)
detailed_score_heatmap(pd.read_csv(os.path.join(path,
'detailed_score.csv')),
model_name)
def probability_heatmap(df, name):
names, classes = classes_names()
w = df.mean(axis=0).values.reshape(classes, classes) #ndarray
w = np.around(w, decimals=3)
prob_frame = pd.DataFrame(data = w, columns = names, index = names)
fig, ax = plt.subplots(figsize=(12, 7))
sns.heatmap(prob_frame, annot=True, linewidths= 1, cmap="YlGnBu", ax = ax)
ax.set_title('True class v.s. Predicted Probability Class')
fig.savefig(os.path.join(graphics_path, name + '_probability.png'),
dpi = 1200,
bbox_inches = "tight")
def cross_heatmap(df, name):
names, classes = classes_names()
w = df.mean(axis=0).values.reshape(classes, classes) #ndarray
for i in range(classes):
w[i] /= np.sum(w[i])
w = np.around(w, decimals=3)
cross_frame = pd.DataFrame(data = w, columns = names, index = names)
fig, ax = plt.subplots(figsize=(12, 7))
sns.heatmap(cross_frame, annot=True, linewidths= 1, cmap="YlGnBu", ax = ax)
ax.set_title('True class v.s. Predicted Class (mean)')
fig.savefig(os.path.join(graphics_path, name + '_cross_prediction.png'),
dpi = 1200,
bbox_inches = "tight")
def mccv_detailed_score_heatmap():
models = mccv_files()
for model_name in models:
df = pd.read_csv(os.path.join(mccv_path,model_name,
'detailed_score.csv'))
detailed_score_heatmap(df, model_name)
def detailed_score_heatmap(df, name):
names, classes = classes_names()
w = df.mean(axis=0).values.reshape(classes, 4)
w = np.around(w, decimals=3)
score_frame = pd.DataFrame(data = w,
columns=['sensitivity', 'specificity',
'precision', 'f1_score'],
index = names)
fig, ax = plt.subplots(figsize=(7, 7))
#color_map = plt.get_cmap('YlGnBu_r')
#color_map = ListedColormap(color_map(np.linspace(0.1, 0.6, 256)))
sns.heatmap(score_frame,
annot=True, linewidths= 0.05, cmap='YlGnBu', ax = ax)
ax.set_title(name + ' Scores')
fig.savefig(os.path.join(graphics_path, name + '_detailed_score.png'),
dpi = 1200,
bbox_inches = "tight")
def final_table():
names, classes = classes_names()
ked_et_al = {'Cellulose acetate': 0.97,
'Cellulose like': 0.65,
'Ethylene propylene rubber': 0.76,
'Morphotype 1': 0.89,
'Morphotype 2': 0.88,
'PEVA': 0.74,
'Poly(amide)': 1,
'Poly(ethylene)' : 1,
'Poly(ethylene) + fouling' : 0.88,
'Poly(ethylene) like' : 0.69,
'Poly(propylene)' : 0.99,
'Poly(propylene) like' : 0.51,
'Poly(styrene)' : 0.99,
'Unknown' : 0 }
w0 = []
for n in names:
w0.append(ked_et_al[n])
w0 = np.array(w0)
#Load model's sensitivity mccv data (using Kedzierski et. al methodology)
df1 = pd.read_csv(os.path.join('results',
'final_model_mccv_all_data_detailed_score.csv')
)
w1 = df1.mean(axis=0).values.reshape(classes, 4)
w1 = np.around(w1, decimals=3)[:, 0]
#Load MCCV results (best model)
df2 = pd.read_csv(best_path)
w2 = df2.mean(axis=0).values.reshape(classes, 4)
w2 = np.around(w2, decimals=3)[:, 0]
#Load model's sensitivity test result
df3 = pd.read_csv(os.path.join('results','final_test_detailed_score.csv'))
w3 = df3.mean(axis=0).values.reshape(classes, 4)
w3 = np.around(w3, decimals=3)[:, 0]
w = np.stack((w0, w1, w2, w3), axis=0)
df = pd.DataFrame(data = w,
columns= names,
index = ["Kedzierski et al.",
"SVC + Kedzierski et al",
"SVC + MCCV",
"SVC Final test"])
fig, ax = plt.subplots(figsize=(12, 5))
ax.set_title('Sensitivity comparison')
sns.heatmap(df,
annot=True, linewidths= 0.05, cmap='YlGnBu', ax = ax)
fig.savefig(os.path.join(graphics_path, 'sensitivity_final_table.png'),
dpi = 1200,
bbox_inches = "tight")
return df
def mccv_files():
return [model for model in os.listdir(mccv_path) if
os.path.isdir(os.path.join(mccv_path, model)) ]
|
import smart_imports
smart_imports.all()
class HeroDescriptionTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def test_no_description(self):
self.assertEqual(logic.get_hero_description(self.hero.id), '')
def test_has_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla')
def test_update_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
logic.set_hero_description(self.hero.id, 'new description')
self.assertEqual(logic.get_hero_description(self.hero.id), 'new description')
class CreateHero(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx',
email='test@test.test',
is_fast=False)
self.attributes = {'is_fast': False,
'is_bot': False,
'might': 0,
'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3),
'premium_state_end_at': datetime.datetime.fromtimestamp(0),
'ban_state_end_at': datetime.datetime.fromtimestamp(0)}
def test_default(self):
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.id, self.account.id)
self.assertEqual(hero.account_id, self.account.id)
self.assertIn(hero.gender, (game_relations.GENDER.MALE,
game_relations.GENDER.FEMALE))
self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration)
self.assertEqual(hero.habit_honor.raw_value, 0)
self.assertEqual(hero.habit_peacefulness.raw_value, 0)
self.assertTrue(hero.preferences.archetype.is_NEUTRAL)
self.assertTrue(hero.upbringing.is_PHILISTINE)
self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS)
self.assertTrue(hero.death_age.is_MATURE)
def test_account_attributes_required(self):
for attribute in self.attributes.keys():
with self.assertRaises(exceptions.HeroAttributeRequiredError):
logic.create_hero(account_id=self.account.id,
attributes={key: value for key, value in self.attributes.items() if key != attribute })
def test_account_attributes(self):
attributes = {'is_fast': random.choice((True, False)),
'is_bot': random.choice((True, False)),
'might': random.randint(1, 1000),
'active_state_end_at': datetime.datetime.fromtimestamp(1),
'premium_state_end_at': datetime.datetime.fromtimestamp(2),
'ban_state_end_at': datetime.datetime.fromtimestamp(3)}
logic.create_hero(account_id=self.account.id, attributes=attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.is_fast, attributes['is_fast'])
self.assertEqual(hero.is_bot, attributes['is_bot'])
self.assertEqual(hero.might, attributes['might'])
self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at'])
self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at'])
self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at'])
def test_attributes(self):
self.attributes.update({'race': game_relations.RACE.random(),
'gender': game_relations.GENDER.random(),
'name': game_names.generator().get_name(game_relations.RACE.random(),
game_relations.GENDER.random()),
'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'archetype': game_relations.ARCHETYPE.random(),
'upbringing': tt_beings_relations.UPBRINGING.random(),
'first_death': tt_beings_relations.FIRST_DEATH.random(),
'death_age': tt_beings_relations.AGE.random()})
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.race, self.attributes['race'])
self.assertEqual(hero.gender, self.attributes['gender'])
self.assertEqual(hero.utg_name, self.attributes['name'])
self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness'])
self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor'])
self.assertEqual(hero.preferences.archetype, self.attributes['archetype'])
self.assertEqual(hero.upbringing, self.attributes['upbringing'])
self.assertEqual(hero.first_death, self.attributes['first_death'])
self.assertEqual(hero.death_age, self.attributes['death_age'])
class RegisterSpendingTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
self.hero.premium_state_end_at
game_tt_services.debug_clear_service()
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_not_in_place(self):
self.hero.position.set_position(0, 0)
self.assertEqual(self.hero.position.place_id, None)
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False)
def test_can_not_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 100)
self.assertTrue(impacts[0].target_type.is_PLACE)
self.assertEqual(impacts[0].target_id, self.places[0].id)
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power__below_zero(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
logic.register_spending(self.hero, -50)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 150)
class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin,
utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def place_0_cost(self):
return logic.get_places_path_modifiers(self.hero)[self.places[0].id]
def test_every_place_has_modifier(self):
modifiers = logic.get_places_path_modifiers(self.hero)
self.assertEqual(set(modifiers.keys()), {place.id for place in self.places})
def test_race_bonus(self):
self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,))
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].race = self.hero.race
def test_modifier_bonus(self):
self.assertFalse(self.places[0].is_modifier_active())
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT)
self.create_effect(self.places[0].id,
value=100500,
attribute=places_relations.ATTRIBUTE.MODIFIER_FORT,
delta=0)
self.places[0].refresh_attributes()
self.assertTrue(self.places[0].is_modifier_active())
def test_home_place(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0])
def test_friend(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0])
def test_enemy(self):
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0])
def test_tax(self):
self.places[0].attrs.size = 10
self.places[0].refresh_attributes()
self.assertEqual(self.places[0].attrs.tax, 0)
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.create_effect(self.places[0].id,
value=100,
attribute=places_relations.ATTRIBUTE.TAX,
delta=0)
self.places[0].refresh_attributes()
HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA),
(-1, 0, 0),
(-1, +1, +c.PATH_MODIFIER_MINOR_DELTA),
( 0, -1, 0),
( 0, 0, 0),
( 0, +1, 0),
(+1, -1, +c.PATH_MODIFIER_MINOR_DELTA),
(+1, 0, 0),
(+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)]
def test_habits__honor(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_honor.set_habit(0)
self.hero.habit_honor.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER)
def test_habits__peacefulness(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_peacefulness.set_habit(0)
self.hero.habit_peacefulness.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_peacefulness.set_habit(hero_direction * c.HABITS_BORDER)
|
import ray
import pytest
from ray.tests.conftest import * # noqa
import numpy as np
from ray import workflow
from ray.workflow.tests import utils
from ray.workflow import workflow_storage
@ray.remote
def checkpoint_dag(checkpoint):
@ray.remote
def large_input():
return np.arange(2 ** 24)
@ray.remote
def identity(x):
return x
@ray.remote
def average(x):
return np.mean(x)
x = utils.update_workflow_options(
large_input, name="large_input", checkpoint=checkpoint
).bind()
y = utils.update_workflow_options(
identity, name="identity", checkpoint=checkpoint
).bind(x)
return workflow.continuation(
utils.update_workflow_options(average, name="average").bind(y)
)
def _assert_step_checkpoints(wf_storage, step_id, mode):
result = wf_storage.inspect_step(step_id)
if mode == "all_skipped":
assert not result.output_object_valid
assert result.output_step_id is None
assert not result.args_valid
assert not result.func_body_valid
assert not result.step_options
elif mode == "output_skipped":
assert not result.output_object_valid
assert result.output_step_id is None
assert result.args_valid
assert result.func_body_valid
assert result.step_options is not None
elif mode == "checkpointed":
assert result.output_object_valid or result.output_step_id is not None
else:
raise ValueError("Unknown mode.")
def test_checkpoint_dag_skip_all(workflow_start_regular_shared):
outputs = utils.run_workflow_dag_with_options(
checkpoint_dag,
(False,),
workflow_id="checkpoint_skip",
name="checkpoint_dag",
checkpoint=False,
)
assert np.isclose(outputs, 8388607.5)
recovered = ray.get(workflow.resume("checkpoint_skip"))
assert np.isclose(recovered, 8388607.5)
wf_storage = workflow_storage.WorkflowStorage("checkpoint_skip")
_assert_step_checkpoints(wf_storage, "checkpoint_dag", mode="output_skipped")
_assert_step_checkpoints(wf_storage, "large_input", mode="all_skipped")
_assert_step_checkpoints(wf_storage, "identity", mode="all_skipped")
_assert_step_checkpoints(wf_storage, "average", mode="all_skipped")
def test_checkpoint_dag_skip_partial(workflow_start_regular_shared):
outputs = utils.run_workflow_dag_with_options(
checkpoint_dag,
(False,),
workflow_id="checkpoint_partial",
name="checkpoint_dag",
)
assert np.isclose(outputs, 8388607.5)
recovered = ray.get(workflow.resume("checkpoint_partial"))
assert np.isclose(recovered, 8388607.5)
wf_storage = workflow_storage.WorkflowStorage("checkpoint_partial")
_assert_step_checkpoints(wf_storage, "checkpoint_dag", mode="checkpointed")
_assert_step_checkpoints(wf_storage, "large_input", mode="output_skipped")
_assert_step_checkpoints(wf_storage, "identity", mode="output_skipped")
_assert_step_checkpoints(wf_storage, "average", mode="checkpointed")
def test_checkpoint_dag_full(workflow_start_regular_shared):
outputs = utils.run_workflow_dag_with_options(
checkpoint_dag, (True,), workflow_id="checkpoint_whole", name="checkpoint_dag"
)
assert np.isclose(outputs, 8388607.5)
recovered = ray.get(workflow.resume("checkpoint_whole"))
assert np.isclose(recovered, 8388607.5)
wf_storage = workflow_storage.WorkflowStorage("checkpoint_whole")
_assert_step_checkpoints(wf_storage, "checkpoint_dag", mode="checkpointed")
_assert_step_checkpoints(wf_storage, "large_input", mode="checkpointed")
_assert_step_checkpoints(wf_storage, "identity", mode="checkpointed")
_assert_step_checkpoints(wf_storage, "average", mode="checkpointed")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
# -*- coding: UTF-8 -*-
import numpy as np
from scipy.optimize import minimize
from scipy.linalg import kron, circulant, inv
from scipy.sparse.linalg import cg
from scipy.sparse import csr_matrix, diags
from scipy.io import mmwrite, mmread
# ----------------
#MINIMIZE = True
MINIMIZE = False
COST = 0
#method = 'CG'
method = 'BFGS'
tol = 1.e-7
verbose = True
MAXITER = 100
FREQ = 10
list_iters = list(range(0,MAXITER,FREQ))[1:]
# ----------------
norm = lambda M: np.linalg.norm(M, 'fro')
# ...
M = mmread("figa/My.mtx")
M = M.tocsr()
# ...
# ...
#from scipy.linalg import toeplitz
#import numpy as np
#p = 3
#n = 128
#a=np.zeros(n) ; b=np.zeros(n)
#a[:p+1] = np.random.random(p+1)
#a[n-p:] = np.random.random(p)
#b[:p+1] = np.random.random(p+1)
#b[n-p:] = np.random.random(p)
#a[0] = 1.
#b[0] = a[0]
#M = toeplitz(a,b)
# ...
# ...
if MINIMIZE:
M = csr_matrix(M)
diag = M.diagonal()
shift = 0
D = diags(diag,shift)
Z = M-D
n,m = M.shape
# ...
# ...
def cost0(c):
C = circulant(c)
nr = norm(M-C)
return nr
# ...
# ...
def cost1(c):
C = circulant(c)
invC = inv(C)
I = np.eye(n)
nr = norm(I-invC*M)
return nr
# ...
# ...
def cost2(c):
C = circulant(c)
nr = norm(Z-C)
return nr
# ...
# ...
if COST == 0:
cost = cost0
if COST == 1:
cost = cost1
if COST == 2:
cost = cost2
# ...
# ...
x0 = np.zeros(n)
x0[0] = 1.
res = minimize( cost, x0 \
, method=method \
, options={'gtol': tol, 'disp': verbose})
c = res.x
# ...
else:
# ...
n,m = M.shape
MD = M.todense()
c = np.zeros(n)
for k in range(0,n):
c1 =0.; c2=0.
for i in range(0,n-k):
c1 += MD[i,k+i]
for i in range(n-k,n):
c2 += MD[i,k+i-n]
c[k] = ( c1 + c2 ) / n
# ...
# ...
C = circulant(c)
invC = inv(C)
C = csr_matrix(C)
# ...
# ...
if COST in [0,1]:
P = invC
if COST in [2]:
_P = D + C
_P = _P.todense()
P = inv(_P)
# idiag = np.array([1./d for d in diag])
# shift = 0
# invD = diags(idiag,shift)
# P = invD + invC
# ...
# ...
b = np.ones(n)
print("----- stand-alone cg ----")
for maxiter in list_iters:
x,niter = cg(M, b, maxiter=maxiter)
print("Error after ", maxiter, " iterations : ", np.linalg.norm(M*x-b))
print("----- preconditionned cg ----")
for maxiter in list_iters:
x,niter = cg(M, b, M=P, maxiter=maxiter)
print("Error after ", maxiter, " iterations : ", np.linalg.norm(M*x-b))
# ...
mmwrite("/home/ratnani/M.mtx", M)
mmwrite("/home/ratnani/P.mtx", C)
|
"""
Number of Connected Components in an Undirected Graph
You have a graph of n nodes. You are given an integer n
and an array edges where edges[i] = [ai, bi] indicates that there is an edge between ai and bi in the graph.
Return the number of connected components in the graph.
Example 1:
Input: n = 5, edges = [[0,1],[1,2],[3,4]]
Output: 2
Example 2:
Input: n = 5, edges = [[0,1],[1,2],[2,3],[3,4]]
Output: 1
Constraints:
1 <= n <= 2000
1 <= edges.length <= 5000
edges[i].length == 2
0 <= ai <= bi < n
ai != bi
There are no repeated edges.
"""
class Solution:
def countComponents(self, n, edges):
count = 0
graph = [[] for _ in range(n)]
seen = [False for _ in range(n)]
for a, b in edges:
graph[a].append(b)
graph[b].append(a)
def dfs(node):
for adj in graph[node]:
if not seen[adj]:
seen[adj] = True
dfs(adj)
for i in range(n):
if not seen[i]:
count += 1
seen[i] = True
dfs(i)
return count
|
from .autogen import DocumentationGenerator
from .gathering_members import get_methods
from .gathering_members import get_classes
from .gathering_members import get_functions
|
import torch
import torch.nn as nn
import numpy as np
import model_search
from genotypes import PRIMITIVES
from genotypes import Genotype
import torch.nn.functional as F
from operations import *
class AutoDeeplab (nn.Module) :
def __init__(self, num_classes, num_layers, criterion, num_channel = 20, multiplier = 5, step = 5, cell=model_search.Cell, crop_size=320, lambda_latency=0.0004):
super(AutoDeeplab, self).__init__()
self.level_2 = []
self.level_4 = []
self.level_8 = []
self.level_16 = []
self.level_32 = []
self.cells = nn.ModuleList()
self._num_layers = num_layers
self._num_classes = num_classes
self._step = step
self._multiplier = multiplier
self._num_channel = num_channel
self._crop_size = crop_size
self._criterion = criterion
self._initialize_alphas ()
self.lambda_latency=lambda_latency
self.stem0 = nn.Sequential(
nn.Conv2d(3, 64, 3, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.ReLU ()
)
self.stem1 = nn.Sequential(
nn.Conv2d(64, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU ()
)
self.stem2 = nn.Sequential(
nn.Conv2d(64, 128, 3, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU ()
)
C_prev_prev = 64
C_prev = 128
for i in range (self._num_layers) :
# def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, rate) : rate = 0 , 1, 2 reduce rate
if i == 0 :
cell1 = cell (self._step, self._multiplier, -1, C_prev, self._num_channel, 1)
cell2 = cell (self._step, self._multiplier, -1, C_prev, self._num_channel * 2, 2)
self.cells += [cell1]
self.cells += [cell2]
elif i == 1 :
cell1_1 = cell (self._step, self._multiplier, C_prev, self._num_channel, self._num_channel, 1)
cell1_2 = cell (self._step, self._multiplier, C_prev, self._num_channel * 2, self._num_channel, 0)
cell2_1 = cell (self._step, self._multiplier, -1, self._num_channel, self._num_channel * 2, 2)
cell2_2 = cell (self._step, self._multiplier, -1, self._num_channel * 2, self._num_channel * 2, 1)
cell3 = cell (self._step, self._multiplier, -1, self._num_channel * 2, self._num_channel * 4, 2)
self.cells += [cell1_1]
self.cells += [cell1_2]
self.cells += [cell2_1]
self.cells += [cell2_2]
self.cells += [cell3]
elif i == 2 :
cell1_1 = cell (self._step, self._multiplier, self._num_channel, self._num_channel, self._num_channel, 1)
cell1_2 = cell (self._step, self._multiplier, self._num_channel, self._num_channel * 2, self._num_channel, 0)
cell2_1 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel, self._num_channel * 2, 2)
cell2_2 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel * 2, self._num_channel * 2, 1)
cell2_3 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel * 4, self._num_channel * 2, 0)
cell3_1 = cell (self._step, self._multiplier, -1, self._num_channel * 2, self._num_channel * 4, 2)
cell3_2 = cell (self._step, self._multiplier, -1, self._num_channel * 4, self._num_channel * 4, 1)
cell4 = cell (self._step, self._multiplier, -1, self._num_channel * 4, self._num_channel * 8, 2)
self.cells += [cell1_1]
self.cells += [cell1_2]
self.cells += [cell2_1]
self.cells += [cell2_2]
self.cells += [cell2_3]
self.cells += [cell3_1]
self.cells += [cell3_2]
self.cells += [cell4]
elif i == 3 :
cell1_1 = cell (self._step, self._multiplier, self._num_channel, self._num_channel, self._num_channel, 1)
cell1_2 = cell (self._step, self._multiplier, self._num_channel, self._num_channel * 2, self._num_channel, 0)
cell2_1 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel, self._num_channel * 2, 2)
cell2_2 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel * 2, self._num_channel * 2, 1)
cell2_3 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel * 4, self._num_channel * 2, 0)
cell3_1 = cell (self._step, self._multiplier, self._num_channel * 4, self._num_channel * 2, self._num_channel * 4, 2)
cell3_2 = cell (self._step, self._multiplier, self._num_channel * 4, self._num_channel * 4, self._num_channel * 4, 1)
cell3_3 = cell (self._step, self._multiplier, self._num_channel * 4, self._num_channel * 8, self._num_channel * 4, 0)
cell4_1 = cell (self._step, self._multiplier, -1, self._num_channel * 4, self._num_channel * 8, 2)
cell4_2 = cell (self._step, self._multiplier, -1, self._num_channel * 8, self._num_channel * 8, 1)
self.cells += [cell1_1]
self.cells += [cell1_2]
self.cells += [cell2_1]
self.cells += [cell2_2]
self.cells += [cell2_3]
self.cells += [cell3_1]
self.cells += [cell3_2]
self.cells += [cell3_3]
self.cells += [cell4_1]
self.cells += [cell4_2]
else :
cell1_1 = cell (self._step, self._multiplier, self._num_channel, self._num_channel, self._num_channel, 1)
cell1_2 = cell (self._step, self._multiplier, self._num_channel, self._num_channel * 2, self._num_channel, 0)
cell2_1 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel, self._num_channel * 2, 2)
cell2_2 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel * 2, self._num_channel * 2, 1)
cell2_3 = cell (self._step, self._multiplier, self._num_channel * 2, self._num_channel * 4, self._num_channel * 2, 0)
cell3_1 = cell (self._step, self._multiplier, self._num_channel * 4, self._num_channel * 2, self._num_channel * 4, 2)
cell3_2 = cell (self._step, self._multiplier, self._num_channel * 4, self._num_channel * 4, self._num_channel * 4, 1)
cell3_3 = cell (self._step, self._multiplier, self._num_channel * 4, self._num_channel * 8, self._num_channel * 4, 0)
cell4_1 = cell (self._step, self._multiplier, self._num_channel * 8, self._num_channel * 4, self._num_channel * 8, 2)
cell4_2 = cell (self._step, self._multiplier, self._num_channel * 8, self._num_channel * 8, self._num_channel * 8, 1)
self.cells += [cell1_1]
self.cells += [cell1_2]
self.cells += [cell2_1]
self.cells += [cell2_2]
self.cells += [cell2_3]
self.cells += [cell3_1]
self.cells += [cell3_2]
self.cells += [cell3_3]
self.cells += [cell4_1]
self.cells += [cell4_2]
self.aspp_device=nn.ModuleList()
for i in range(7):
self.aspp_device.append(nn.ModuleList())
for j in range(4):
self.aspp_device[i].append(nn.Sequential (
ASPP (self._num_channel*(2**j), 96//(4*(2**j)), 96//(4*(2**j)), self._num_classes)
)
)
self.device_output=[[]]*7
self.aspp_4 = nn.Sequential (
ASPP (self._num_channel, 24, 24, self._num_classes)
)
self.aspp_8 = nn.Sequential (
ASPP (self._num_channel * 2, 12, 12, self._num_classes)
)
self.aspp_16 = nn.Sequential (
ASPP (self._num_channel * 4, 6, 6, self._num_classes)
)
self.aspp_32 = nn.Sequential (
ASPP (self._num_channel * 8, 3, 3, self._num_classes)
)
def forward (self, x) :
self.level_2 = []
self.level_4 = []
self.level_8 = []
self.level_16 = []
self.level_32 = []
self.la_4=[]
self.la_8=[]
self.la_16=[]
self.la_32=[]
self.la_4.append(0)
self.latency = [[0]*4 for _ in range(7)]
# self._init_level_arr (x)
temp = self.stem0 (x)
self.level_2.append (self.stem1 (temp))
self.level_4.append (self.stem2 (self.level_2[-1]))
weight_cell=F.softmax(self.alphas_cell, dim=-1)
weight_network=F.softmax(self.alphas_network, dim=-1)
weight_part=F.softmax(self.alphas_part, dim=-1)
device_output=[[]]*7
count = 0
for layer in range (self._num_layers) :
if layer == 0 :
level4_new = self.cells[count] (None, self.level_4[-1], weight_cell)
la_4_new = self.cells[count].latency(None, self.la_4[-1], weight_cell)
count += 1
level8_new = self.cells[count] (None, self.level_4[-1], weight_cell)
la_8_new = self.cells[count].latency(None, self.la_4[-1], weight_cell)
count += 1
self.level_4.append (level4_new * weight_network[layer][0][0])
self.level_8.append (level8_new * weight_network[layer][1][0])
self.la_4.append (la_4_new * weight_network[layer][0][0])
self.la_8.append (la_8_new * weight_network[layer][1][0])
# print ((self.level_4[-2]).size (), (self.level_4[-1]).size())
elif layer == 1 :
level4_new_1 = self.cells[count] (self.level_4[-2], self.level_4[-1], weight_cell)
la_4_new_1 = self.cells[count].latency(self.la_4[-2],self.la_4[-1] , weight_cell)
count += 1
level4_new_2 = self.cells[count] (self.level_4[-2], self.level_8[-1], weight_cell)
la_4_new_2 = self.cells[count].latency(self.la_4[-2], self.la_8[-1], weight_cell)
count += 1
level4_new = weight_network[layer][0][0] * level4_new_1 + weight_network[layer][0][1] * level4_new_2
la_4_new = weight_network[layer][0][0] * la_4_new_1 + weight_network[layer][0][1] * la_4_new_2
level8_new_1 = self.cells[count] (None, self.level_4[-1], weight_cell)
la_8_new_1 = self.cells[count].latency(None, self.la_4[-1], weight_cell)
count += 1
level8_new_2 = self.cells[count] (None, self.level_8[-1], weight_cell)
la_8_new_2 = self.cells[count].latency(None, self.la_8[-1], weight_cell)
count += 1
level8_new = weight_network[layer][1][0] * level8_new_1 + weight_network[layer][1][1] * level8_new_2
la_8_new = weight_network[layer][1][0] * la_8_new_1 + weight_network[layer][1][1] * la_8_new_2
level16_new = self.cells[count] (None, self.level_8[-1], weight_cell)
la_16_new = self.cells[count].latency (None, self.la_8[-1], weight_cell)
level16_new = level16_new * weight_network[layer][2][0]
la_16_new = la_16_new * weight_network[layer][2][0]
count += 1
self.level_4.append (level4_new)
self.level_8.append (level8_new)
self.level_16.append (level16_new)
self.la_4.append (la_4_new)
self.la_8.append (la_8_new)
self.la_16.append (la_16_new)
elif layer == 2 :
level4_new_1 = self.cells[count] (self.level_4[-2], self.level_4[-1], weight_cell)
la_4_new_1 = self.cells[count].latency (self.la_4[-2], self.la_4[-1], weight_cell)
count += 1
level4_new_2 = self.cells[count] (self.level_4[-2], self.level_8[-1], weight_cell)
la_4_new_2 = self.cells[count].latency (self.la_4[-2], self.la_8[-1], weight_cell)
count += 1
level4_new = weight_network[layer][0][0] * level4_new_1 + weight_network[layer][0][1] * level4_new_2
la_4_new = weight_network[layer][0][0] * la_4_new_1 + weight_network[layer][0][1] * la_4_new_2
level8_new_1 = self.cells[count] (self.level_8[-2], self.level_4[-1], weight_cell)
la_8_new_1 = self.cells[count].latency (self.la_8[-2], self.la_4[-1], weight_cell)
count += 1
level8_new_2 = self.cells[count] (self.level_8[-2], self.level_8[-1], weight_cell)
la_8_new_2 = self.cells[count].latency (self.la_8[-2], self.la_8[-1], weight_cell)
count += 1
# print (self.level_8[-1].size(),self.level_16[-1].size())
level8_new_3 = self.cells[count] (self.level_8[-2], self.level_16[-1], weight_cell)
la_8_new_3 = self.cells[count].latency (self.la_8[-2], self.la_16[-1], weight_cell)
count += 1
level8_new = weight_network[layer][1][0] * level8_new_1 + weight_network[layer][1][1] * level8_new_2 + weight_network[layer][1][2] * level8_new_3
la_8_new = weight_network[layer][1][0] * la_8_new_1 + weight_network[layer][1][1] * la_8_new_2 +weight_network[layer][1][2] * la_8_new_3
level16_new_1 = self.cells[count] (None, self.level_8[-1], weight_cell)
la_16_new_1 = self.cells[count].latency (None, self.la_8[-1], weight_cell)
count += 1
level16_new_2 = self.cells[count] (None, self.level_16[-1], weight_cell)
la_16_new_2 = self.cells[count].latency (None, self.la_16[-1], weight_cell)
count += 1
la_16_new = weight_network[layer][2][0] * la_16_new_1 + weight_network[layer][2][1] * la_16_new_2
level16_new = weight_network[layer][2][0] * level16_new_1 + weight_network[layer][2][1] * level16_new_2
level32_new = self.cells[count] (None, self.level_16[-1], weight_cell)
la_32_new = self.cells[count].latency (None, self.la_16[-1], weight_cell)
level32_new = level32_new * weight_network[layer][3][0]
la_32_new = la_32_new * weight_network[layer][3][0]
count += 1
self.level_4.append (level4_new)
self.level_8.append (level8_new)
self.level_16.append (level16_new)
self.level_32.append (level32_new)
self.la_4.append (la_4_new)
self.la_8.append (la_8_new)
self.la_16.append (la_16_new)
self.la_32.append (la_32_new)
elif layer == 3 :
level4_new_1 = self.cells[count] (self.level_4[-2], self.level_4[-1], weight_cell)
la_4_new_1 = self.cells[count].latency (self.la_4[-2], self.la_4[-1], weight_cell)
count += 1
level4_new_2 = self.cells[count] (self.level_4[-2], self.level_8[-1], weight_cell)
la_4_new_2 = self.cells[count].latency (self.la_4[-2], self.la_8[-1], weight_cell)
count += 1
level4_new = weight_network[layer][0][0] * level4_new_1 + weight_network[layer][0][1] * level4_new_2
la_4_new = weight_network[layer][0][0] * la_4_new_1 + weight_network[layer][0][1] * la_4_new_2
level8_new_1 = self.cells[count] (self.level_8[-2], self.level_4[-1], weight_cell)
la_8_new_1 = self.cells[count].latency (self.la_8[-2], self.la_4[-1], weight_cell)
count += 1
level8_new_2 = self.cells[count] (self.level_8[-2], self.level_8[-1], weight_cell)
la_8_new_2 = self.cells[count].latency (self.la_8[-2], self.la_8[-1], weight_cell)
count += 1
level8_new_3 = self.cells[count] (self.level_8[-2], self.level_16[-1], weight_cell)
la_8_new_3 = self.cells[count].latency (self.la_8[-2], self.la_16[-1], weight_cell)
count += 1
level8_new = weight_network[layer][1][0] * level8_new_1 + weight_network[layer][1][1] * level8_new_2 + weight_network[layer][1][2] * level8_new_3
la_8_new = weight_network[layer][1][0] * la_8_new_1 + weight_network[layer][1][1] * la_8_new_2 + weight_network[layer][1][2] * la_8_new_3
level16_new_1 = self.cells[count] (self.level_16[-2], self.level_8[-1], weight_cell)
la_16_new_1 = self.cells[count].latency (self.la_16[-2], self.la_8[-1], weight_cell)
count += 1
level16_new_2 = self.cells[count] (self.level_16[-2], self.level_16[-1], weight_cell)
la_16_new_2 = self.cells[count].latency (self.la_16[-2], self.la_16[-1], weight_cell)
count += 1
level16_new_3 = self.cells[count] (self.level_16[-2], self.level_32[-1], weight_cell)
la_16_new_3 = self.cells[count].latency (self.la_16[-2], self.la_32[-1], weight_cell)
count += 1
level16_new = weight_network[layer][2][0] * level16_new_1 + weight_network[layer][2][1] * level16_new_2 + weight_network[layer][2][2] * level16_new_3
la_16_new = weight_network[layer][2][0] * la_16_new_1 + weight_network[layer][2][1] * la_16_new_2 + weight_network[layer][2][2] * la_16_new_3
level32_new_1 = self.cells[count] (None, self.level_16[-1], weight_cell)
la_32_new_1 = self.cells[count].latency (None, self.la_16[-1], weight_cell)
count += 1
level32_new_2 = self.cells[count] (None, self.level_32[-1], weight_cell)
la_32_new_2 = self.cells[count].latency (None, self.la_32[-1], weight_cell)
count += 1
level32_new = weight_network[layer][3][0] * level32_new_1 + weight_network[layer][3][1] * level32_new_2
la_32_new = weight_network[layer][3][0] * la_32_new_1 + weight_network[layer][3][1] * la_32_new_2
self.level_4.append (level4_new)
self.level_8.append (level8_new)
self.level_16.append (level16_new)
self.level_32.append (level32_new)
self.la_4.append (la_4_new)
self.la_8.append (la_8_new)
self.la_16.append (la_16_new)
self.la_32.append (la_32_new)
else :
level4_new_1 = self.cells[count] (self.level_4[-2], self.level_4[-1], weight_cell)
la_4_new_1 = self.cells[count].latency (self.la_4[-2], self.la_4[-1], weight_cell)
count += 1
level4_new_2 = self.cells[count] (self.level_4[-2], self.level_8[-1], weight_cell)
la_4_new_2 = self.cells[count].latency (self.la_4[-2], self.la_8[-1], weight_cell)
count += 1
level4_new = weight_network[layer][0][0] * level4_new_1 + weight_network[layer][0][1] * level4_new_2
la_4_new = weight_network[layer][0][0] * la_4_new_1 + weight_network[layer][0][1] * la_4_new_2
if layer<11:
device_output[layer-4].append(self.aspp_device[layer-4][0](level4_new))
self.latency[layer-4][0]=la_4_new
level8_new_1 = self.cells[count] (self.level_8[-2], self.level_4[-1], weight_cell)
la_8_new_1 = self.cells[count].latency (self.la_8[-2], self.la_4[-1], weight_cell)
count += 1
level8_new_2 = self.cells[count] (self.level_8[-2], self.level_8[-1], weight_cell)
la_8_new_2 = self.cells[count].latency (self.la_8[-2], self.la_8[-1], weight_cell)
count += 1
level8_new_3 = self.cells[count] (self.level_8[-2], self.level_16[-1], weight_cell)
la_8_new_3 = self.cells[count].latency (self.la_8[-2], self.la_16[-1], weight_cell)
count += 1
level8_new = weight_network[layer][1][0] * level8_new_1 + weight_network[layer][1][1] * level8_new_2 + weight_network[layer][1][2] * level8_new_3
la_8_new = weight_network[layer][1][0] * la_8_new_1 + weight_network[layer][1][1] * la_8_new_2 + weight_network[layer][1][2] * la_8_new_3
if layer<11:
device_output[layer-4].append(self.aspp_device[layer-4][1](level8_new))
self.latency[layer-4][1]=la_8_new
level16_new_1 = self.cells[count] (self.level_16[-2], self.level_8[-1], weight_cell)
la_16_new_1 = self.cells[count].latency (self.la_16[-2], self.la_8[-1], weight_cell)
count += 1
level16_new_2 = self.cells[count] (self.level_16[-2], self.level_16[-1], weight_cell)
la_16_new_2 = self.cells[count].latency (self.la_16[-2], self.la_16[-1], weight_cell)
count += 1
level16_new_3 = self.cells[count] (self.level_16[-2], self.level_32[-1], weight_cell)
la_16_new_3 = self.cells[count].latency (self.la_16[-2], self.la_32[-1], weight_cell)
count += 1
level16_new = weight_network[layer][2][0] * level16_new_1 + weight_network[layer][2][1] * level16_new_2 + weight_network[layer][2][2] * level16_new_3
la_16_new = weight_network[layer][2][0] * la_16_new_1 + weight_network[layer][2][1] * la_16_new_2 + weight_network[layer][2][2] * la_16_new_3
if layer<11:
device_output[layer-4].append(self.aspp_device[layer-4][2](level16_new))
self.latency[layer-4][2]=la_16_new
level32_new_1 = self.cells[count] (self.level_32[-2], self.level_16[-1], weight_cell)
la_32_new_1 = self.cells[count].latency (self.la_32[-2], self.la_16[-1], weight_cell)
count += 1
level32_new_2 = self.cells[count] (self.level_32[-2], self.level_32[-1], weight_cell)
la_32_new_2 = self.cells[count].latency (self.la_32[-2], self.la_32[-1], weight_cell)
count += 1
level32_new = weight_network[layer][3][0] * level32_new_1 + weight_network[layer][3][1] * level32_new_2
la_32_new = weight_network[layer][3][0] * la_32_new_1 + weight_network[layer][3][1] * la_32_new_2
if layer<11:
device_output[layer-4].append(self.aspp_device[layer-4][3](level32_new))
self.latency[layer-4][3]=la_32_new
self.level_4.append (level4_new)
self.level_8.append (level8_new)
self.level_16.append (level16_new)
self.level_32.append (level32_new)
self.la_4.append (la_4_new)
self.la_8.append (la_8_new)
self.la_16.append (la_16_new)
self.la_32.append (la_32_new)
# print (self.level_4[-1].size(),self.level_8[-1].size(),self.level_16[-1].size(),self.level_32[-1].size())
# concate_feature_map = torch.cat ([self.level_4[-1], self.level_8[-1],self.level_16[-1], self.level_32[-1]], 1)
aspp_result_4 = self.aspp_4 (self.level_4[-1])
aspp_result_8 = self.aspp_8 (self.level_8[-1])
aspp_result_16 = self.aspp_16 (self.level_16[-1])
aspp_result_32 = self.aspp_32 (self.level_32[-1])
upsample = nn.Upsample(size=x.size()[2:], mode='bilinear', align_corners=True)
aspp_result_4 = upsample (aspp_result_4)
aspp_result_8 = upsample (aspp_result_8)
aspp_result_16 = upsample (aspp_result_16)
aspp_result_32 = upsample (aspp_result_32)
sum_feature_map1 = torch.add (aspp_result_4, aspp_result_8)
sum_feature_map2 = torch.add (aspp_result_16, aspp_result_32)
sum_feature_map = torch.add (sum_feature_map1, sum_feature_map2)
device_out=[0]*7
for i in range(len(device_output)):
device_output[i][0] = upsample (device_output[i][0])
device_output[i][1] = upsample (device_output[i][1])
device_output[i][2] = upsample (device_output[i][2])
device_output[i][3] = upsample (device_output[i][3])
#device_out[i] = torch.add(device_output[i][0],device_output[i][1],device_output[i][2],device_output[i][3])
add1=torch.add(device_output[i][0],device_output[i][1])
add2=torch.add(device_output[i][2],device_output[i][3])
device_out[i]=torch.add(add1,add2)
device_out[i]=device_out[i]*weight_part[i]
device_logits=device_out[0]
for i in range(1,len(device_out)):
device_logits=torch.add(device_out[i], device_logits)
#device_out=torch.sum(device_out)
latency_loss=[0]*7
for i in range(7):
for j in range(4):
latency_loss[i]+=self.latency[i][j]
latency_loss[i]*=weight_part[i]
total_latency_loss=sum(latency_loss)
return sum_feature_map, device_logits, total_latency_loss
def _initialize_alphas(self):
k = sum(1 for i in range(self._step) for n in range(2+i))
num_ops = len(PRIMITIVES)
self.alphas_cell = torch.tensor (1e-3*torch.randn(k, num_ops).cuda(), requires_grad=True)
self.alphas_network = torch.tensor (1e-3*torch.randn(self._num_layers, 4, 3).cuda(), requires_grad=True)
self.alphas_part = torch.tensor (1e-3*torch.randn(7).cuda(), requires_grad=True)
# self.alphas_cell = self.alphas_cell.cuda ()
# self.alphas_network = self.alphas_network.cuda ()
self._arch_parameters = [
self.alphas_cell,
self.alphas_network,
self.alphas_part
]
def decode_network (self) :
best_result = []
max_prop = 0
def _parse (weight_network, layer, curr_value, curr_result, last) :
nonlocal best_result
nonlocal max_prop
if layer == self._num_layers :
if max_prop < curr_value :
# print (curr_result)
best_result = curr_result[:]
max_prop = curr_value
return
if layer == 0 :
print ('begin0')
num = 0
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
print ('end0-1')
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
elif layer == 1 :
print ('begin1')
num = 0
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
print ('end1-1')
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
num = 1
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][2]
curr_result.append ([num,2])
_parse (weight_network, layer + 1, curr_value, curr_result, 2)
curr_value = curr_value / weight_network[layer][num][2]
curr_result.pop ()
elif layer == 2 :
print ('begin2')
num = 0
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
print ('end2-1')
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
num = 1
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][2]
curr_result.append ([num,2])
_parse (weight_network, layer + 1, curr_value, curr_result, 2)
curr_value = curr_value / weight_network[layer][num][2]
curr_result.pop ()
num = 2
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 2)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][2]
curr_result.append ([num,2])
_parse (weight_network, layer + 1, curr_value, curr_result, 3)
curr_value = curr_value / weight_network[layer][num][2]
curr_result.pop ()
else :
num = 0
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
num = 1
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 0)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][2]
curr_result.append ([num,2])
_parse (weight_network, layer + 1, curr_value, curr_result, 2)
curr_value = curr_value / weight_network[layer][num][2]
curr_result.pop ()
num = 2
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 1)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 2)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][2]
curr_result.append ([num,2])
_parse (weight_network, layer + 1, curr_value, curr_result, 3)
curr_value = curr_value / weight_network[layer][num][2]
curr_result.pop ()
num = 3
if last == num :
curr_value = curr_value * weight_network[layer][num][0]
curr_result.append ([num,0])
_parse (weight_network, layer + 1, curr_value, curr_result, 2)
curr_value = curr_value / weight_network[layer][num][0]
curr_result.pop ()
curr_value = curr_value * weight_network[layer][num][1]
curr_result.append ([num,1])
_parse (weight_network, layer + 1, curr_value, curr_result, 3)
curr_value = curr_value / weight_network[layer][num][1]
curr_result.pop ()
network_weight = F.softmax(self.alphas_network, dim=-1) * 5
network_weight = network_weight.data.cpu().numpy()
_parse (network_weight, 0, 1, [],0)
print (max_prop)
return best_result
def arch_parameters (self) :
return self._arch_parameters
def genotype(self):
def _parse(weights):
gene = []
n = 2
start = 0
for i in range(self._step):
end = start + n
W = weights[start:end].copy()
edges = sorted (range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if k != PRIMITIVES.index('none'):
if k_best is None or W[j][k] > W[j][k_best]:
k_best = k
gene.append((PRIMITIVES[k_best], j))
start = end
n += 1
return gene
gene_cell = _parse(F.softmax(self.alphas_cell, dim=-1).data.cpu().numpy())
concat = range(2+self._step-self._multiplier, self._step+2)
genotype = Genotype(
cell=gene_cell, cell_concat=concat
)
return genotype
def softmax_network(self):
weight_network=F.softmax(self.alphas_network, dim = -1).clone().detach()
helper1=torch.ones(1).cuda()
helper2=torch.ones(2).cuda()
helper1=1
helper2[0]=1
for layer in range(12):
if layer==0:
weight_network[layer][0][0]=torch.ones(1, requires_grad=True)
weight_network[layer][1][0]=torch.ones(1, requires_grad=True)
if layer==1:
weight_network[layer][0][:2]=F.softmax(weight_network[layer][0][:2], dim = -1)
weight_network[layer][2][0]=torch.ones(1, requires_grad=True)
if layer==2:
weight_network[layer][0][:2]=F.softmax(weight_network[layer][0][:2], dim = -1)
weight_network[layer][2][:2]=F.softmax(weight_network[layer][2][:2], dim = -1)
weight_network[layer][3][0]=torch.ones(1, requires_grad=True)
else:
weight_network[layer][0][:2]=F.softmax(weight_network[layer][0][:2], dim = -1)
weight_network[layer][3][:2]=F.softmax(weight_network[layer][3][:2], dim = -1)
return weight_network
def _loss (self, input, target) :
logits, device_logits, latency_loss= self (input)
lambda_latency=self.lambda_latency
stem_latency = 7.8052422817230225
latency_loss = latency_loss + stem_latency
#weight_part=F.softmax(self.alphas_part, dim = -1)
# for i in range(len(device_logits)):
# device_loss.append(self._criterion(device_logits[i], target)* self.alphas_part[i])
# device_loss=sum(device_loss)
loss = self._criterion (logits, target)
device_loss=self._criterion(device_logits, target)
return logits, device_logits, device_loss + loss + lambda_latency*latency_loss, lambda_latency*latency_loss, loss, device_loss
def main () :
model = AutoDeeplab (5, 12, None)
x = torch.tensor (torch.ones (4, 3, 224, 224))
result = model.decode_network ()
print (result)
print (model.genotype())
# x = x.cuda()
# y = model (x)
# print (model.arch_parameters ())
# print (y.size())
if __name__ == '__main__' :
main ()
|
from .individual_history_resource import *
|
"""
This file includes the reimplementations of GSLIB functionality in Python. While
this code will not be as well-tested and robust as the original GSLIB, it does
provide the opportunity to build 2D spatial modeling projects in Python without
the need to rely on compiled Fortran code from GSLIB. If you want to use the
GSLIB compiled code called from Python workflows use the functions available
with geostatspy.GSLIB.
This file includes the (1) GSLIB subroutines (converted to Python), followed by
the (2) functions: declus, gam, gamv, nscore, kb2d (more added all the time)
Note: some GSLIB subroutines are not included as they were replaced by available
NumPy and SciPy functionality or they were not required as we don't have to deal
with graphics and files in the same manner as GSLIB.
The original GSLIB code is from GSLIB: Geostatistical Library by Deutsch and
Journel, 1998. The reimplementation is by Michael Pyrcz, Associate Professor,
the University of Texas at Austin.
"""
import math # for trig functions etc.
from copy import copy # to not change objects passed by ref
from bisect import bisect # for maintaining array elements sorted
import numpy as np # for ndarrays
import numpy.linalg as linalg # for linear algebra
import scipy.spatial as sp # for fast nearest neighbor search
import numba as nb # for numerical speed up
from numba import jit # for numerical speed up
from numba.typed import Dict # typing of dictionaries
from statsmodels.stats.weightstats import DescrStatsW
JITKW = dict(nopython=True, cache=True, fastmath=True)
JITPL = dict(parallel=False)
def backtr(df, vcol, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
"""Back transform an entire DataFrame column with a provided transformation table and tail extrapolation.
:param df: the source DataFrame
:param vcol: the column with the variable to transfrom
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: TODO
"""
EPSLON = 1.0e-20
nd = len(df)
nt = len(vr) # number of data to transform and number of data in table
backtr = np.zeros(nd)
vrgs = df[vcol].values
# Value in the lower tail? 1=linear, 2=power, (3 and 4 are invalid):
for id in range(0, nd):
if vrgs[id] <= vrg[0]:
backtr[id] = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs[id])
if ltail == 1:
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif ltail == 2:
cpow = 1.0 / ltpar
backtr[id] = powint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
# Value in the upper tail? 1=linear, 2=power, 4=hyperbolic:
elif vrgs[id] >= vrg[nt-1]:
backtr[id] = vr[nt-1]
cdfhi = gcum(vrg[nt-1])
cdfbt = gcum(vrgs[id])
if utail == 1:
backtr[id] = powint(cdfhi, 1.0, vr[nt-1], zmax, cdfbt, 1.0)
elif utail == 2:
cpow = 1.0 / utpar
backtr[id] = powint(cdfhi, 1.0, vr[nt-1], zmax, cdfbt, cpow)
elif utail == 4:
plambda = (vr[nt-1]**utpar)*(1.0-gcum(vrg[nt-1]))
backtr[id] = (plambda/(1.0-gcum(vrgs)))**(1.0/utpar)
else:
# Value within the transformation table:
j = locate(vrg, 1, nt, vrgs[id])
j = max(min((nt-2), j), 1)
backtr[id] = powint(vrg[j], vrg[j+1], vr[j],
vr[j+1], vrgs[id], 1.0)
return backtr
def backtr_value(vrgs, vr, vrg, zmin, zmax, ltail, ltpar, utail, utpar):
"""Back transform a single value with a provided transformation table and tail extrapolation.
:param vrgs: value to transform
:param vr: the transformation table, 1D ndarray with the original values
:param vrg: the transformation table, 1D ndarray with the trasnformed variable
:param zmin: lower trimming limits
:param zmax: upper trimming limits
:param ltail: lower tail value
:param ltpar: lower tail extrapolation parameter
:param utail: upper tail value
:param utpar: upper tail extrapolation parameter
:return: TODO
"""
EPSLON = 1.0e-20
nt = len(vr) # number of data to transform
# Value in the lower tail? 1=linear, 2=power, (3 and 4 are invalid):
if vrgs <= vrg[0]:
backtr = vr[0]
cdflo = gcum(vrg[0])
cdfbt = gcum(vrgs)
if ltail == 1:
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, 1.0)
elif ltail == 2:
cpow = 1.0 / ltpar
backtr = dpowint(0.0, cdflo, zmin, vr[0], cdfbt, cpow)
# Value in the upper tail? 1=linear, 2=power, 4=hyperbolic:
elif vrgs >= vrg[nt-1]:
backtr = vr[nt-1]
cdfhi = gcum(vrg[nt-1])
cdfbt = gcum(vrgs)
if utail == 1:
backtr = dpowint(cdfhi, 1.0, vr[nt-1], zmax, cdfbt, 1.0)
elif utail == 2:
cpow = 1.0 / utpar
backtr = dpowint(cdfhi, 1.0, vr[nt-1], zmax, cdfbt, cpow)
elif utail == 4:
plambda = (vr[nt-1]**utpar)*(1.0-gcum(vrg[nt-1]))
backtr = (plambda/(1.0-gcum(vrgs)))**(1.0/utpar)
else:
# Value within the transformation table:
j = dlocate(vrg, 1, nt, vrgs)
j = max(min((nt-2), j), 1)
backtr = dpowint(vrg[j], vrg[j+1], vr[j], vr[j+1], vrgs, 1.0)
return backtr
def gcum(x):
"""Calculate the cumulative probability of the standard normal distribution.
:param x: the value from the standard normal distribution
:return: TODO
"""
z = x
if z < 0:
z = -z
t = 1./(1. + 0.2316419*z)
gcum = t*(0.31938153 + t*(-0.356563782 + t *
(1.781477937 + t*(-1.821255978 + t*1.330274429))))
e2 = 0.
# 6 standard deviations out gets treated as infinity:
if z <= 6.:
e2 = np.exp(-z*z/2.)*0.3989422803
gcum = 1.0 - e2 * gcum
if x >= 0:
return gcum
gcum = 1.0 - gcum
return gcum
def locate(xx, iis, iie, x):
"""Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (GSLIB version).
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO
"""
n = len(xx)
# Initialize lower and upper methods:
if iis <= 0:
iis = 0
if iie >= n:
iie = n-1
jl = iis-1
ju = iie
if xx[n-1] <= x:
j = iie
return j
# If we are not done then compute a midpoint:
while (ju-jl) > 1:
jm = int((ju+jl)/2)
# Replace the lower or upper limit with the midpoint:
if (xx[iie] > xx[iis]) == (x > xx[jm]):
jl = jm
else:
ju = jm
# Return with the array index:
j = jl
return j
def dlocate(xx, iis, iie, x):
"""Return value `j` such that `x` is between `xx[j]` and `xx[j+1]`, where
`xx` is an array of length `n`, and `x` is a given value. `xx` must be
monotonic, either increasing or decreasing (updated with Python bisect)
:param xx: array
:param iis: start point
:param iie: end point
:param x: given value
:return: TODO
"""
n = len(xx)
if iie <= iis:
iis = 0
iie = n - 1
array = xx[iis: iie - 1] # this is accounting for swith to 0,...,n-1 index
j = bisect(array, x)
return j
def powint(xlow, xhigh, ylow, yhigh, xval, power):
"""Power-based interpolator
:param xlow: x lower interval
:param xhigh: x upper interval
:param ylow: y lower interval
:param yhigh: y upper interval
:param xval: value on x
:param power: power for interpolation
:return: TODO
"""
EPSLON = 1.0e-20
if (xhigh-xlow) < EPSLON:
powint = (yhigh+ylow)/2.0
else:
powint = ylow + (yhigh-ylow)*(((xval-xlow)/(xhigh-xlow))**power)
return powint
def dsortem(ib, ie, a, iperm, b=0, c=0, d=0, e=0, f=0, g=0, h=0):
"""Sort array in ascending order.
:param ib: start index
:param ie: end index
:param a: array
:param iperm: 0 no other array is permuted.
1 array b is permuted according to array a.
2 arrays b, c are permuted.
3 arrays b, c, d are permuted.
4 arrays b, c, d, e are permuted.
5 arrays b, c, d, e, f are permuted.
6 arrays b, c, d, e, f, g are permuted.
7 arrays b, c, d, e, f, g, h are permuted.
>7 no other array is permuted.
:param b: array to be permuted according to array a.
:param c: array to be permuted according to array a.
:param d: array to be permuted according to array a.
:param e: array to be permuted according to array a.
:param f: array to be permuted according to array a.
:param g: array to be permuted according to array a.
:param h: array to be permuted according to array a.
:return: a: the array, a portion of which has been sorted.
b, c, d, e, f, g, h: arrays permuted according to array a (see
iperm)
"""
a = a[ib:ie]
inds = a.argsort()
a = np.copy(a[inds]) # deepcopy forces pass to outside scope
if iperm == 1:
return a
b_slice = b[ib:ie]
b = b_slice[inds]
if iperm == 2:
return a, b
c_slice = c[ib:ie]
c = c_slice[inds]
if iperm == 3:
return a, b, c
d_slice = d[ib:ie]
d = d_slice[inds]
if iperm == 4:
return a, b, c, d
e_slice = e[ib:ie]
e = e_slice[inds]
if iperm == 5:
return a, b, c, d, e
f_slice = f[ib:ie]
f = f_slice[inds]
if iperm == 6:
return a, b, c, d, e, f
g_slice = g[ib:ie]
g = g_slice[inds]
if iperm == 7:
return a, b, c, d, e, f, g # TODO: changed from 'a, b, c, d, e, f, h'
h_slice = h[ib:ie]
h = h_slice[inds]
return a, b, c, d, e, f, g, h # TODO: changed from 'a, b, c, d, e, f, h'
def gauinv(p):
"""Compute the inverse of the standard normal cumulative distribution
function.
:param p: cumulative probability value
:return: TODO
"""
lim = 1.0e-10
p0 = -0.322_232_431_088
p1 = -1.0
p2 = -0.342_242_088_547
p3 = -0.020_423_121_024_5
p4 = -0.000_045_364_221_014_8
q0 = 0.099_348_462_606_0
q1 = 0.588_581_570_495
q2 = 0.531_103_462_366
q3 = 0.103_537_752_850
q4 = 0.003_856_070_063_4
# Check for an error situation
if p < lim:
xp = -1.0e10
return xp
if p > (1.0 - lim):
xp = 1.0e10
return xp
# Get k for an error situation
pp = p
if p > 0.5:
pp = 1 - pp
xp = 0.0
if p == 0.5:
return xp
# Approximate the function
y = np.sqrt(np.log(1.0 / (pp * pp)))
xp = float(
y
+ ((((y * p4 + p3) * y + p2) * y + p1) * y + p0)
/ ((((y * q4 + q3) * y + q2) * y + q1) * y + q0)
)
if float(p) == float(pp):
xp = -xp
return xp
def gcum(x):
"""Evaluate the standard normal cdf given a normal deviate `x`. `gcum` is
the area under a unit normal curve to the left of `x`. The results are
accurate only to about 5 decimal places.
:param x: TODO
:return: TODO
"""
z = x
if z < 0:
z = -z
t = 1.0 / (1.0 + 0.231_641_9 * z)
gcum_ = t * (
0.319_381_53
+ t
* (
-0.356_563_782
+ t * (1.781_477_937 + t * (-1.821_255_978 + t * 1.330_274_429))
)
)
e2 = 0.0
# Standard deviations out gets treated as infinity
if z <= 6:
e2 = np.exp(-z * z / 2.0) * 0.398_942_280_3
gcum_ = 1.0 - e2 * gcum_
if x >= 0.0:
return gcum_
gcum_ = 1.0 - gcum_
return gcum_
def dpowint(xlow, xhigh, ylow, yhigh, xval, pwr):
"""Power interpolate the value of `y` between (`xlow`, `ylow`) and
(`xhigh`, `yhigh`) for a value of `x` and a power `pwr`.
:param xlow: TODO
:param xhigh: TODO
:param ylow: TODO
:param yhigh: TODO
:param xval: TODO
:param pwr: power
:return: TODO
"""
EPSLON = 1.0e-20
if (xhigh - xlow) < EPSLON:
dpowint_ = (yhigh + ylow) / 2.0
else:
dpowint_ = ylow + (yhigh - ylow) * (
((xval - xlow) / (xhigh - xlow)) ** pwr
)
return dpowint_
# @jit(**JITKW) # all NumPy array operations included in this function for precompile with NumBa
def setup_rotmat2(c0, nst, it, cc, ang):
DTOR = 3.14159265/180.0
EPSLON = 0.000000
PI = 3.141593
# The first time around, re-initialize the cosine matrix for the
# variogram structures:
rotmat = np.zeros((4, nst))
maxcov = c0
for js in range(0, nst):
azmuth = (90.0-ang[js])*DTOR
rotmat[0, js] = math.cos(azmuth)
rotmat[1, js] = math.sin(azmuth)
rotmat[2, js] = -1*math.sin(azmuth)
rotmat[3, js] = math.cos(azmuth)
if it[js] == 4:
maxcov = maxcov + 9999.9
else:
maxcov = maxcov + cc[js]
return rotmat, maxcov
@jit(**JITKW)
def setup_rotmat(c0, nst, it, cc, ang, pmx):
"""Setup rotation matrix.
:param c0: nugget constant (isotropic)
:param nst: number of nested structures (max. 4)
:param it: TODO
:param cc: multiplicative factor of each nested structure
:param ang: TODO
:param pmx: TODO
:return: TODO
"""
PI = 3.141_592_65
DTOR = PI / 180.0
# The first time around, re-initialize the cosine matrix for the variogram
# structures
rotmat = np.zeros((4, nst))
maxcov = c0
for js in range(0, nst):
azmuth = (90.0 - ang[js]) * DTOR
rotmat[0, js] = math.cos(azmuth)
rotmat[1, js] = math.sin(azmuth)
rotmat[2, js] = -1 * math.sin(azmuth)
rotmat[3, js] = math.cos(azmuth)
if it[js] == 4:
maxcov = maxcov + pmx
else:
maxcov = maxcov + cc[js]
return rotmat, maxcov
@jit(**JITKW)
def cova2(x1, y1, x2, y2, nst, c0, pmx, cc, aa, it, ang, anis, rotmat, maxcov):
"""Calculate the covariance associated with a variogram model specified by a
nugget effect and nested variogram structures.
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param nst: number of nested structures (maximum of 4)
:param c0: isotropic nugget constant (TODO: not used)
:param pmx: TODO
:param cc: multiplicative factor of each nested structure
:param aa: parameter `a` of each nested structure
:param it: TODO
:param ang: TODO: not used
:param anis: TODO
:param rotmat: rotation matrices
:param maxcov: TODO
:return: TODO
"""
EPSLON = 0.000001
# Check for very small distance
dx = x2 - x1
dy = y2 - y1
if (dx * dx + dy * dy) < EPSLON:
cova2_ = maxcov
return cova2_
# Non-zero distance, loop over all the structures
cova2_ = 0.0
for js in range(0, nst):
# Compute the appropriate structural distance
dx1 = dx * rotmat[0, js] + dy * rotmat[1, js]
dy1 = (dx * rotmat[2, js] + dy * rotmat[3, js]) / anis[js]
h = math.sqrt(max((dx1 * dx1 + dy1 * dy1), 0.0))
if it[js] == 1:
# Spherical model
hr = h / aa[js]
if hr < 1.0:
cova2_ = cova2_ + cc[js] * (1.0 - hr * (1.5 - 0.5 * hr * hr))
elif it[js] == 2:
# Exponential model
cova2_ = cova2_ + cc[js] * np.exp(-3.0 * h / aa[js])
elif it[js] == 3:
# Gaussian model
hh = -3.0 * (h * h) / (aa[js] * aa[js])
cova2_ = cova2_ + cc[js] * np.exp(hh)
elif it[js] == 4:
# Power model
cov1 = pmx - cc[js] * (h ** aa[js])
cova2_ = cova2_ + cov1
return cova2_
def sqdist(x1, y1, z1, x2, y2, z2, ind, rotmat):
# Compute component distance vectors and the squared distance:
dx = x1 - x2
dy = y1 - y2
dz = 0.0
sqdist = 0.0
for i in range(0, 2):
cont = rotmat[ind, i, 1] * dx + rotmat[ind, i, 2] * dy
sqdist = sqdist + cont * cont
return sqdist
def sqdist2(x1, y1, x2, y2, ist, rotmat, anis):
"""Calculate the 2D square distance based on geometric ani
:param x1: x coordinate of first point
:param y1: y coordinate of first point
:param x2: x coordinate of second point
:param y2: y coordinate of second point
:param ist: structure index
:param rotmat: 2d rotation matrix
:param anis: 2D anisotropy ratio
:return: TODO
"""
# Compute component distance vectors and the squared distance:
dx = x1 - x2
dy = y1 - y2
dx1 = (dx*rotmat[0, ist] + dy*rotmat[1, ist])
dy1 = (dx*rotmat[2, ist] + dy*rotmat[3, ist])/anis[ist]
sqdist_ = (dx1*dx1+dy1*dy1)
return sqdist_
def setrot(ang1, ang2, sang1, anis1, anis2, sanis1, nst, MAXROT):
"""GSLIB's SETROT subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
DEG2RAD = 3.141592654/180.0
EPSLON = 1.e-20
rotmat = np.zeros((MAXROT+1, 3, 3))
if ang1 >= 0.0 and ang1 < 270.0:
alpha = (90.0 - ang1) * DEG2RAD
else:
alpha = (450.0 - ang1) * DEG2RAD
# Get the required sines and cosines:
sina = math.sin(alpha)
cosa = math.cos(alpha)
# Construct the rotation matrix in the required memory:
afac1 = 1.0 / (max(anis1, EPSLON))
rotmat[0, 1, 1] = cosa
rotmat[0, 1, 2] = sina
rotmat[0, 2, 1] = afac1*(-sina)
rotmat[0, 2, 2] = afac1*(cosa)
# 2nd structure if present
if nst > 1:
if ang2 >= 0.0 and ang2 < 270.0:
alpha = (90.0 - ang2) * DEG2RAD
else:
alpha = (450.0 - ang2) * DEG2RAD
# Get the required sines and cosines:
sina = math.sin(alpha)
cosa = math.cos(alpha)
# Construct the rotation matrix in the required memory:
afac2 = 1.0 / (max(anis2, EPSLON))
rotmat[1, 1, 1] = cosa
rotmat[1, 1, 2] = sina
rotmat[1, 2, 1] = afac1*(-sina)
rotmat[1, 2, 2] = afac1*(cosa)
# search rotation
if sang1 >= 0.0 and sang1 < 270.0:
alpha = (90.0 - sang1) * DEG2RAD
else:
alpha = (450.0 - sang1) * DEG2RAD
# Get the required sines and cosines:
sina = math.sin(alpha)
cosa = math.cos(alpha)
# Construct the rotation matrix in the required memory:
afac1 = 1.0 / (max(sanis1, EPSLON))
rotmat[MAXROT, 1, 1] = cosa
rotmat[MAXROT, 1, 2] = sina
rotmat[MAXROT, 2, 1] = afac1*(-sina)
rotmat[MAXROT, 2, 2] = afac1*(cosa)
# Return to calling program:
return rotmat
@jit(**JITKW)
def ksol_numpy(neq, a, r):
"""Find solution of a system of linear equations.
:param neq: number of equations
:param a: upper triangular left hand side matrix
:param r: right hand side matrix
:return: solution array, same dimension as `r`
"""
a = a[0: neq * neq] # trim the array
a = np.reshape(a, (neq, neq)) # reshape to 2D
ainv = linalg.inv(a).copy() # invert matrix
r = r[0: neq] # trim the array
# s = np.matmul(ainv, r) # matrix multiplication
s = ainv @ r # matrix multiplication
return s
def ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MAXXYZ, xsiz, ysiz, isrot, nx, ny, nst, c0, cc, aa, it, ang, anis, global_rotmat, radsqd):
"""GSLIB's CTABLE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only, WARNING: only spiral search setup works currently.
"""
# Declare constants
TINY = 1.0e-10
PMX = 9999.9
MAXROT = 2
# Size of the look-up table:
tmp = np.zeros(MAXXYZ)
MAXORD = MAXXYZ
if (nx*ny) < MAXCXY:
MAXORD = MAXCXY
order = np.zeros(MAXORD)
nctx = int(min(((MAXCTX-1)/2), (nx-1)))
ncty = int(min(((MAXCTY-1)/2), (ny-1)))
# print('CTable check')
# print('nctx ' + str(nctx) + ', ncty ' + str(ncty))
ixnode = np.zeros(MAXXYZ)
iynode = np.zeros(MAXXYZ)
covtab = np.zeros((MAXCTX, MAXCTY))
# Initialize the covariance subroutine and cbb at the same time:
rotmat, maxcov = setup_rotmat2(c0, nst, it, cc, ang)
cbb = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc,
aa, it, ang, anis, rotmat, maxcov)
# Now, set up the table and keep track of the node offsets that are
# within the search radius:
nlooku = -1 # adjusted for 0 origin
for i in range(-nctx, nctx+1): # cover entire range
xx = i * xsiz
ic = nctx + i
for j in range(-ncty, ncty+1): # cover entire range
yy = j * ysiz
jc = ncty + j
covtab[ic, jc] = cova2(
0.0, 0.0, xx, yy, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
# print('cov table offset'); print(xx,yy); print(covtab[ic,jc])
hsqd = sqdist(0.0, 0.0, 0.0, xx, yy, 0.0, MAXROT, global_rotmat)
if hsqd <= radsqd:
nlooku = nlooku + 1
# We want to search by closest variogram distance (and use the
# anisotropic Euclidean distance to break ties:
tmp[nlooku] = - (covtab[ic, jc] - TINY*hsqd)
order[nlooku] = (jc)*MAXCTX+ic
# print('populated presort'); print(tmp,order)
# Finished setting up the look-up table, now order the nodes such
# that the closest ones, according to variogram distance, are searched
# first. Note: the "loc" array is used because I didn't want to make
# special allowance for 2 byte integers in the sorting subroutine:
nlooku = nlooku + 1
# print('nlooku' + str(nlooku)); print('MAXCTX' + str(MAXCTX))
tmp, order = dsortem(0, nlooku, tmp, 2, b=order)
# print('populated postsort'); print(tmp,order)
for il in range(0, nlooku):
loc = int(order[il])
iy = int((loc-0)/MAXCTX)
ix = loc - (iy-0)*MAXCTX
iynode[il] = int(iy)
ixnode[il] = int(ix)
# print('populated ix, iy node list'); print(ixnode, iynode)
return covtab, tmp, order, ixnode, iynode, nlooku, nctx, ncty
def srchnd(ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST):
"""GSLIB's SRCHND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
# Consider all the nearby nodes until enough have been found:
ncnode = 0
icnode = np.zeros(nodmax, dtype=int)
icnode.fill(-1)
cnodev = np.zeros(nodmax)
cnodex = np.zeros(nodmax)
cnodey = np.zeros(nodmax)
# print('Node search at '); print(ix,iy)
# print('nlooku'); print(nlooku)
if noct > 0:
ninoct = np.zeros(8)
for il in range(0, nlooku):
if ncnode == nodmax:
return ncnode, icnode, cnodev, cnodex, cnodey
i = ix + (int(ixnode[il])-nctx)
j = iy + (int(iynode[il])-ncty)
# print('i,j'); print(i,j)
if i < 0 or j < 0:
continue
if i >= nx or j >= ny:
continue
ind = i + (j)*nx
if sim[ind] > UNEST:
icnode[ncnode] = il
cnodex[ncnode] = xmn + (i)*xsiz # adjust for 0 origin
cnodey[ncnode] = ymn + (j)*ysiz
cnodev[ncnode] = sim[ind]
# print('srchnd found at index - ' +str(ind) + ' at x and y ' + str(cnodex[ncnode]) + ',' + str(cnodey[ncnode]))
# print(' ix = ' + str(i) + ' and iy = ' + str(j))
# print(' value = ' + str(sim[ind]))
ncnode = ncnode + 1 # moved to account for origin 0
return ncnode, icnode, cnodev, cnodex, cnodey
def beyond(ivtype, nccut, ccut, ccdf, ncut, cut, cdf, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval):
"""GSLIB's BEYOND subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
EPSLON = 1.0e-20
UNEST = -1.0
# Check for both "zval" and "cdfval" defined or undefined:
ierr = 1
if zval > UNEST and cdfva > UNEST:
return -1
if zval <= UNEST and cdfval <= UNEST:
return - 1
# Handle the case of a categorical variable:
if ivtype == 0:
cum = 0
for i in range(0, nccut):
cum = cum + ccdf[i]
if cdfval <= cum:
zval = ccut[i]
return zval
return zval
# Figure out what part of distribution: ipart = 0 - lower tail
# ipart = 1 - middle
# ipart = 2 - upper tail
ierr = 0
ipart = 1
if zva > UNEST:
if zval <= ccut[0]:
ipart = 0
if zval >= ccut[nccut-1]:
ipart = 2
else:
if cdfval <= ccdf[0]:
ipart = 0
if cdfval >= ccdf[nccut-1]:
ipart = 2
# ARE WE IN THE LOWER TAIL?
if ipart == 0:
if ltail == 1:
# Straight Linear Interpolation:
powr = 1.0
if zval > UNEST:
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, powr)
else:
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
elif ltail == 2:
# Power Model interpolation to lower limit "zmin"?
if zval > UNEST:
cdfval = powint(zmin, ccut[0], 0.0, ccdf[0], zval, ltpar)
else:
powr = 1.0 / ltpar
zval = powint(0.0, ccdf[0], zmin, ccut[0], cdfval, powr)
# Linear interpolation between the rescaled global cdf?
elif ltail == 3:
if zval > UNEST:
# Computing the cdf value. Locate the point and the class bound:
idat = locate(cut, 1, ncut, zval)
iupp = locate(cut, ncut, 1, ncut, ccut[0])
# Straight linear interpolation if no data; otherwise, linear:
if idat <= -1 or idat >= ncut - 1 or iupp <= -1 or iupp >= ncut-1: # modfity for 0 index
cdfval = powint(zmin, cut[0], 0.0, cdf[0], zval, 1.)
else:
temp = powint(cut[idat], cut[idat+1],
cdf[idat], cdf[idat+1], zval, 1.)
cdfval = temp*ccdf[0]/cdf[iupp]
else:
# Computing Z value: Are there any data out in the tail?
iupp = locate(cut, ncut, 1, ncut, ccut[0])
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if iupp <= 0 or iupp >= ncut:
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.)
else:
temp = cdfval*cdf[iupp]/ccdf[1]
idat = locate(cdf, ncut, 1, ncut, temp)
if idat <= -1 or idat >= ncut-1: # adjusted for 0 origin
zval = powint(0.0, cdf[0], zmin, cut[0], cdfval, 1.)
else:
zval = powint(cdf[idat], cdf[idat+1],
cut[dat], cut[idat+1], temp, 1.)
else:
# Error situation - unacceptable option:
ierr = 2
return -1
# FINISHED THE LOWER TAIL, ARE WE IN THE MIDDLE?
if ipart == 1:
# Establish the lower and upper limits:
if zval > UNEST:
cclow = locate(ccut, 1, nccut, zval)
else:
cclow = locate(ccdf, 1, nccut, cdfval)
cchigh = cclow + 1
if middle == 1:
# Straight Linear Interpolation:
powr = 1.0
if zval > UNEST:
cdfval = powint(ccut[cclow], ccut[cchigh],
ccdf[cclow], ccdf[cchigh], zval, powr)
else:
zval = powint(ccdf[cclow], ccdf[cchigh],
ccut[cclow], ccut[cchigh], cdfval, powr)
# Power interpolation between class bounds?
elif middle == 2:
if zval > UNEST:
cdfval = powint(ccut[cclow], ccut[cchigh],
ccdf[cclow], ccdf[cchigh], zval, mpar)
else:
powr = 1.0 / mpar
zval = powint(ccdf[cclow], ccdf[cchigh],
ccut[cclow], ccut[cchigh], cdfval, powr)
# Linear interpolation between the rescaled global cdf?
elif middle == 3:
ilow = locate(cut, ncut, 1, ncut, ccut[cclow])
iupp = locate(cut, ncut, 1, ncut, ccut[cchigh])
if cut[ilow] < ccut[cclow]:
ilow = ilow + 1
if cut[iupp] > ccut[cchigh]:
iupp = iupp - 1
if zval > UNEST:
idat = locate(cut, 1, ncut, zval)
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if idat <= -1 or idat >= ncut-1 or ilow <= -1 or ilow >= ncut-1 or iupp <= -1 or iupp >= ncut-1 or iupp <= ilow:
cdfval = powint(ccut[cclow], ccut[cchigh],
ccdf[cclow], ccdf[cchigh], zval, 1.)
else:
temp = powint(cut[idat], cut[idat+1],
cdf[idat], cdf[idat+1], zval, 1.)
cdfval = powint(cdf[ilow], cdf[iupp],
ccdf[cclow], ccdf[cchigh], temp, 1.)
else:
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if ilow <= -1 or ilow >= ncut-1 or iup <= -1 or iupp >= ncut-1 or iupp < ilow:
zval = powint(ccdf[cclow], ccdf[cchigh],
ccut[cclow], ccut[cchigh], cdfval, 1.)
else:
temp = powint(ccdf[cclow], ccdf[cchigh],
cdf[ilow], cdf[iupp], cdfval, 1.)
idat = locate(cdf, 1, ncut, temp)
if cut[idat] < ccut[cclow]:
idat = idat+1
if idat <= -1 or idat >= ncut-1 or cut[idat+1] > ccut[cchigh]:
zval = powint(ccdf[cclow], ccdf[cchigh],
ccut[cclow], ccut[cchigh], cdfval, 1.)
else:
zval = powint(cdf[idat], cdf[idat+1],
cut[idat], cut[idat+1], temp, 1.)
zval = powint(cdf[idat], cdf[idat+1],
cut[idat], cut[idat+1], temp, 1.)
else:
# Error situation - unacceptable option:
ierr = 2
return -1
# FINISHED THE MIDDLE, ARE WE IN THE UPPER TAIL?
if ipart == 2:
if utail == 1:
powr = 1.0
if zval > UNEST:
cdfval = powint(ccut(nccut), zmax,
ccdf(nccut), 1.0, zval, powr)
else:
zval = powint(ccdf(nccut), 1.0, ccut(
nccut), zmax, cdfval, powr)
elif utail == 2:
# Power interpolation to upper limit "utpar"?
if zval > UNEST:
cdfval = powint(ccut(nccut), zmax, ccdf(
nccut), 1.0, zval, utpar)
else:
powr = 1.0 / utpar
zval = powint(ccdf(nccut), 1.0, ccut(
nccut), zmax, cdfval, powr)
# Linear interpolation between the rescaled global cdf?
elif utail == 3:
if zval > UNEST:
# Approximately Locate the point and the class bound:
idat = locate(cut, 1, ncut, zval, idat)
ilow = locate(cut, 1, ncut, ccut(nccut), ilow)
if cut[idat] < zval:
idat = idat + 1
if cut[ilow] < ccut[nccut-1]:
ilow = ilow + 1
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if idat < -1 or idat >= ncut-1 or ilow <= -1 or ilow >= ncut-1:
cdfval = powint(ccut(nccut), zmax,
ccdf(nccut), 1.0, zval, 1.)
else:
temp = powint(cut(idat), cut(idat+1),
cdf(idat), cdf(idat+1), zval, 1.)
cdfval = powint(cdf(ilow), 1.0, ccdf(nccut), 1.0, temp, 1.)
else:
# Computing Z value: Are there any data out in the tail?
ilow = locate(cut, ncut, 1, ncut, ccut(nccut), ilow)
if cut[ilow] < ccut[nccut-1]:
ilow = ilow + 1
# Straight linear interpolation if no data; otherwise, local linear
# interpolation:
if ilow <= -1 or ilow >= ncut-1:
zval = powint(ccdf(nccut), 1.0, ccut(
nccut), zmax, cdfval, 1.)
else:
temp = powint(ccdf(nccut), 1.0, cdf(ilow), 1.0, cdfval, 1.)
idat = locate(cdf, ncut, 1, ncut, temp)
if cut[idat] < ccut[nccut-1]:
idat = idat+1
if idat >= ncut-1:
zval = powint(ccdf[nccut-1], 1.0,
ccut[nccut-1], zmax, cdfval, 1.)
else:
zval = powint(cdf[idat], cdf[idat+1],
cut[idat], cut[idat+1], temp, 1.)
# Fit a Hyperbolic Distribution?
elif utail == 4:
# Figure out "lambda" and required info:
lambd = math.pow(ccut[nccut], utpar)*(1.0-ccdf[nccut-1])
if zval > UNEST:
cdfval = 1.0 - (lambd/(math.pow(zval, utpar)))
else:
zval = (lambd/math.pow((1.0-cdfval), (1.0/utpar)))
else:
# Error situation - unacceptable option:
ierr = 2
return -1
if zval < zmin:
zval = zmin
if zval > zmax:
zval = zmax
# All finished - return:
return zval
def krige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"""GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only.
"""
EPSLON = 1.0e-20
cur_index = ix + (iy)*nx
# print('krige at grid '); print(ix,iy)
# print('krige at node '); print(xx,yy)
# print('grid index = '); print(cur_index)
# print('Check ixnode '); print(ixnode); print(iynode)
nclose = len(close)
ncnode = (icnode >= 0).sum()
# print('In kriging, maxcov = ' + str(maxcov))
# print('kriging')
# print('nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('MAXKR1'); print(MAXKR1)
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# print(r.shape)
# Local mean
if lktype == 2:
gmean = lvm[cur_index]
else:
gmean = 0.0
# Size of the kriging system:
first = False
na = nclose + ncnode
# print('lktype' + str(lktype))
if lktype == 0:
neq = na
if lktype == 1:
# print('ordinary kriging')
neq = na + 1
if lktype == 2:
neq = na
if lktype == 3:
neq = na + 2
if lktype == 4:
neq = na + 1
# print('prior matrix build neq'); print(neq)
# print('na'); print(na)
# Set up kriging matrices:
iin = -1 # acocunting for 0 origin
# print('krige na' + str(na))
for j in range(0, na):
# Sort out the actual location of point "j"
if j < nclose: # adjusted for 0 index origin
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
# print('data: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
if sec.shape[0] > 1:
vrea[j] = sec[index]
else:
vrea[j] = 0.0 # added this - no effect
if lktype == 2:
vra[j] = vra[j] - vrea[j]
else:
# It is a previously simulated node (keep index for table look-up):
# print(j)
index = j-(nclose) # adjust for 0 index
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
# print('prev node: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
ix1 = ix + (int(ixnode[ind])-nctx-1)
iy1 = iy + (int(iynode[ind])-ncty-1)
# print('ix1, iy1 = '); print(ix1,iy1)
index = ix1 + (iy1-1)*nx
if lktype == 2:
vrea[j] = lvm[index]
vra[j] = vra[j] - vrea[j]
for i in range(0, na): # we need the full matrix
# print('kriging indice populated' + str(j) + ',' + str(i))
# Sort out the actual location of point "i"
if i < nclose:
index = int(close[i]) # adjust for 0 index
x2 = x[index]
y2 = y[index]
else:
# It is a previously simulated node (keep index for table look-up):
#print('i = ' + str(i) + ',nclose = ' + str(nclose) + ', na = ' + str(na))
index = i-(nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
# print('previous node index' + str(ind))
ix2 = ix + (int(ixnode[ind])-nctx-1)
iy2 = iy + (int(iynode[ind])-ncty-1)
# Now, get the covariance value:
iin = iin + 1
# print('kriging data location = '); print(x2,y2)
# Decide whether or not to use the covariance look-up table:
if j <= nclose or i <= nclose:
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
a[iin] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix1 - ix2)
# jj = ncty + 1 + (iy1 - iy2)
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY:
# cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
# print(x1,y1,x2,y2,cov)
a[iin] = cov
# Get the RHS value (possibly with covariance look-up table):
if j <= nclose:
# print(cc,aa,it,ang,anis,rotmat,maxcov)
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for data ')
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix - ix1)
# jj = ncty + 1 + (iy - iy1)
# print('RHS ctable coord' + str(ii) + ',' + str(jj))
# print('ix,iy ='); print(ix,iy)
# print('ix1,iy1'); print(ix1,iy1)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY: # adjusted for origin 0
# print('Not using covariance table')
# cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for node ' + str(j))
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
# print('kriging, writing RHS '+ str(j) + ',' + str(cov) + 'loc_est' + str(xx) + ',' + str(yy) + 'data' + str(x1) + ',' + str(y1))
rr[j] = r[j]
if lktype == 1: # we need the full array
iin = iin + 1
a[iin] = 1.0
if lktype == 4: # we need the full array
iin = iin + 1
a[iin] = colocorr*r[j]
# Addition of OK constraint:
if lktype == 1 or lktype == 3:
for i in range(0, na):
iin = iin + 1
a[iin] = 1.0
iin = iin + 1
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
# Addition of the External Drift Constraint:
if lktype == 3:
edmin = 999999.
edmax = -999999.
for i in range(0, na):
iin = iin + 1
a[iin] = vrea(i)
if a[iin] < edmin:
edmin = a[iin]
if a[iin] > edmax:
edmax = a[iin]
iin = iin + 1
a[iin] = 0.0
iin = iin + 1
a[iin] = 0.0
ind = ix + (iy-1)*nx
r[na+1] = lvm[ind]
rr[na+1] = r[na+1]
if (edmax-edmin) < EPSLON:
neq = neq - 1
# Addition of Collocated Cosimulation Constraint:
if lktype == 4:
colc = True
sfmin = 1.0e21
sfmax = -1.0e21
for i in range(0, na):
iin = iin + 1
a[iin] = colocorr*r[i]
if a[iin] < sfmin:
sfmin = a[iin]
if a[iin] > sfmax:
sfmax = a[iin]
iin = iin + 1
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
# if (sfmax-sfmin) < EPSLON:
# neq = neq - 1
# colc = False
# Solve the Kriging System:
# print('neq = ' + str(neq));
# print('a'); print(a)
# print('r'); print(r)
# print('data'); print(vra)
if neq == 1 and lktype != 3:
# print('neq = 1 '); print(a,r)
s[0] = r[0] / a[0]
else:
# print('neq prior ksol' + str(neq))
s = ksol_numpy(neq, a, r)
# print('neq post ksol' + str(neq))
# if s.shape[0]< neq:
# print('s shape'); print(s.shape)
# print('a'); print(a)
# print('r'); print(r)
ising = 0 # need to figure this out
# print('s'); print(s)
# Compute the estimate and kriging variance. Recall that kriging type
# 0 = Simple Kriging:
# 1 = Ordinary Kriging:
# 2 = Locally Varying Mean:
# 3 = External Drift:
# 4 = Collocated Cosimulation:
# print('kriging weights'); print(s)
cmean = 0.0
# print('cbb = ' + str(cbb))
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = cmean + s[i]*vra[i]
cstdev = cstdev - s[i]*rr[i]
sumwts = sumwts + s[i]
if lktype == 1:
cstdev = cstdev - s[na]
# print('Ordinary Weight' + str(s[na]))
if lktype == 2:
cmean = cmean + gmean
if lktype == 4 and colc == True: # we may drop colocated if low covariance dispersion
ind = ix + (iy-1)*nx
# print(ind)
# print('neq'); print(neq)
# print('s'); print(s.shape)
# print('lvm'); print(lvm.shape)
# print('colc wt = ' + str(s[na]) + ' for ' + str(lvm[cur_index]) + ' at index ' + str(cur_index))
cmean = cmean + s[na]*lvm[cur_index]
cstdev = cstdev - s[na] * rr[na]
# Error message if negative variance:
if cstdev < 0.0:
# print('ERROR: Negative Variance: ' + str(cstdev))
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
# print('kriging estimate and variance' + str(cmean) + ', ' + str(cstdev))
return cmean, cstdev
def ikrige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, gmean, lvm, close, covtab, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey, nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov, MAXCTX, MAXCTY, MAXKR1, MAXKR2):
"""GSLIB's KRIGE subroutine (Deutsch and Journel, 1998) converted from the
original Fortran to Python and modified for indicator kriging by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
Note this was simplified to 2D only. WARNING: tested only for ktype 0,1,2 (2 is local proportion model / local mean provided, not residual approach)
"""
EPSLON = 1.0e-20
cur_index = ix + (iy)*nx
# print('krige at grid '); print(ix,iy)
# print('krige at node '); print(xx,yy)
# print('grid index = '); print(cur_index)
# print('Check ixnode '); print(ixnode); print(iynode)
nclose = len(close)
ncnode = (icnode >= 0).sum()
# print('In kriging, maxcov = ' + str(maxcov))
# print('kriging')
# print('nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('MAXKR1'); print(MAXKR1)
vra = np.zeros(MAXKR1)
vrea = np.zeros(MAXKR1)
r = np.zeros(MAXKR1)
rr = np.zeros(MAXKR1)
s = np.zeros(MAXKR1)
a = np.zeros(MAXKR2)
cbb = cova2(0, 0, 0, 0, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# print(r.shape)
# Local mean # just pass the local probability as gmean
# if lktype == 2:
# gmean = lvm[cur_index]
# keep input gmean otherwise
# Size of the kriging system:
first = False
na = nclose + ncnode
# print('lktype' + str(lktype))
if lktype == 0:
neq = na
if lktype == 1:
# print('ordinary kriging')
neq = na + 1
if lktype == 2:
neq = na
if lktype == 3:
neq = na + 2
if lktype == 4:
neq = na + 1
# print('prior matrix build neq'); print(neq)
# print('na'); print(na)
# print('kriging data close'); print(close)
# print('kriging node close'); print(icnode)
# Set up kriging matrices:
iin = -1 # acocunting for 0 origin
# print('krige na' + str(na))
for j in range(0, na):
# Sort out the actual location of point "j"
if j < nclose: # adjusted for 0 index origin
index = int(close[j])
x1 = x[index]
y1 = y[index]
vra[j] = vr[index]
# print('data: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
# if lvm.shape[0] > 1:
# vrea[j]= sec[index];
# else:
vrea[j] = 0.0 # added this - no effect
# if lktype == 2: vra[j] = vra[j] - vrea[j] # just using local variable mean not full residual approach
else:
# It is a previously simulated node (keep index for table look-up):
# print(j)
index = j-(nclose) # adjust for 0 index
x1 = cnodex[index]
y1 = cnodey[index]
vra[j] = cnodev[index]
ind = icnode[index]
# print('prev node: index = ' + str(index) + ', x,y ' + str(x1) + ',' + str(y1) + ', value = ' + str(vra[j]))
ix1 = ix + (int(ixnode[ind])-nctx-1)
iy1 = iy + (int(iynode[ind])-ncty-1)
# print('ix1, iy1 = '); print(ix1,iy1)
index = ix1 + (iy1-1)*nx
# if lktype == 2:
# vrea[j]= lvm[index]
# vra[j] = vra[j] - vrea[j]
for i in range(0, na): # we need the full matrix
# print('kriging indice populated' + str(j) + ',' + str(i))
# Sort out the actual location of point "i"
if i < nclose:
index = int(close[i]) # adjust for 0 index
x2 = x[index]
y2 = y[index]
else:
# It is a previously simulated node (keep index for table look-up):
#print('i = ' + str(i) + ',nclose = ' + str(nclose) + ', na = ' + str(na))
index = i-(nclose)
x2 = cnodex[index]
y2 = cnodey[index]
ind = icnode[index]
# print('previous node index' + str(ind))
ix2 = ix + (int(ixnode[ind])-nctx-1)
iy2 = iy + (int(iynode[ind])-ncty-1)
# Now, get the covariance value:
iin = iin + 1
# print('kriging data location = '); print(x2,y2)
# Decide whether or not to use the covariance look-up table:
if j <= nclose or i <= nclose:
# print('x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov')
# print(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# print('cov'); print(cov)
a[iin] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix1 - ix2)
# jj = ncty + 1 + (iy1 - iy2)
cov = cova2(x1, y1, x2, y2, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY:
# cov = cova2(x1,y1,x2,y2,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
# print(x1,y1,x2,y2,cov)
a[iin] = cov
# Get the RHS value (possibly with covariance look-up table):
if j <= nclose:
# print(cc,aa,it,ang,anis,rotmat,maxcov)
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for data ')
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
else:
# Try to use the covariance look-up (if the distance is in range):
# ii = nctx + 1 + (ix - ix1)
# jj = ncty + 1 + (iy - iy1)
# print('RHS ctable coord' + str(ii) + ',' + str(jj))
# print('ix,iy ='); print(ix,iy)
# print('ix1,iy1'); print(ix1,iy1)
# if ii < 0 or ii >= MAXCTX or jj < 0 or jj >= MAXCTY: # adjusted for origin 0
# print('Not using covariance table')
# cov = cova2(xx,yy,x1,y1,nst,c0,9999.9,cc,aa,it,ang,anis,rotmat,maxcov)
# else:
# cov = covtab[ii,jj]
cov = cova2(xx, yy, x1, y1, nst, c0, 9999.9, cc,
aa, it, ang, anis, rotmat, maxcov)
# if cov >= 1.0:
# print('cov of 1.0 RHS for node ' + str(j))
# print('ix,iy ='); print(xx,xx)
# print('ix1,iy1'); print(x1,y1)
r[j] = cov
# print('kriging, writing RHS '+ str(j) + ',' + str(cov) + 'loc_est' + str(xx) + ',' + str(yy) + 'data' + str(x1) + ',' + str(y1))
rr[j] = r[j]
if lktype == 1: # we need the full array
iin = iin + 1
a[iin] = 1.0
if lktype == 4: # we need the full array
iin = iin + 1
a[iin] = colocorr*r[j]
# Addition of OK constraint:
if lktype == 1 or lktype == 3:
for i in range(0, na):
iin = iin + 1
a[iin] = 1.0
iin = iin + 1
a[iin] = 0.0
r[na] = 1.0
rr[na] = 1.0
# Addition of the External Drift Constraint:
if lktype == 3:
edmin = 999999.
edmax = -999999.
for i in range(0, na):
iin = iin + 1
a[iin] = vrea(i)
if a[iin] < edmin:
edmin = a[iin]
if a[iin] > edmax:
edmax = a[iin]
iin = iin + 1
a[iin] = 0.0
iin = iin + 1
a[iin] = 0.0
ind = ix + (iy-1)*nx
r[na+1] = lvm[ind]
rr[na+1] = r[na+1]
if (edmax-edmin) < EPSLON:
neq = neq - 1
# Addition of Collocated Cosimulation Constraint:
if lktype == 4:
colc = True
sfmin = 1.0e21
sfmax = -1.0e21
for i in range(0, na):
iin = iin + 1
a[iin] = colocorr*r[i]
if a[iin] < sfmin:
sfmin = a[iin]
if a[iin] > sfmax:
sfmax = a[iin]
iin = iin + 1
a[iin] = 1.0
ii = na
r[ii] = colocorr
rr[ii] = r[ii]
# if (sfmax-sfmin) < EPSLON:
# neq = neq - 1
# colc = False
# Solve the Kriging System:
# print('Kriging equations neq = ' + str(neq));
# print('a'); print(a)
# print('r'); print(r)
# print('data'); print(vra)
if neq == 1 and lktype != 3:
# print('neq = 1 '); print(a,r)
s[0] = r[0] / a[0]
else:
# print('neq prior ksol' + str(neq))
s = ksol_numpy(neq, a, r)
# print('neq post ksol' + str(neq))
# if s.shape[0]< neq:
# print('s shape'); print(s.shape)
# print('a'); print(a)
# print('r'); print(r)
ising = 0 # need to figure this out
# print('s'); print(s)
# Compute the estimate and kriging variance. Recall that kriging type
# 0 = Simple Kriging:
# 1 = Ordinary Kriging:
# 2 = Locally Varying Mean:
# 3 = External Drift:
# 4 = Collocated Cosimulation:
# print('kriging weights'); print(s)
cmean = 0.0
# print('cbb = ' + str(cbb))
cstdev = cbb
sumwts = 0.0
for i in range(0, na):
cmean = cmean + s[i]*vra[i]
cstdev = cstdev - s[i]*rr[i]
sumwts = sumwts + s[i]
if lktype == 1:
cstdev = cstdev - s[na]
# print('Ordinary Weight' + str(s[na]))
# if lktype == 2: cmean = cmean + gmean
if lktype == 4 and colc == True: # we may drop colocated if low covariance dispersion
ind = ix + (iy-1)*nx
# print(ind)
# print('neq'); print(neq)
# print('s'); print(s.shape)
# print('lvm'); print(lvm.shape)
# print('colc wt = ' + str(s[na]) + ' for ' + str(lvm[cur_index]) + ' at index ' + str(cur_index))
cmean = cmean + s[na]*lvm[cur_index]
cstdev = cstdev - s[na] * rr[na]
if lktype == 0 or lktype == 2:
cmean = cmean + (1.0-sumwts)*gmean
# print('cmean'); print(cmean)
# Error message if negative variance:
if cstdev < 0.0:
# print('ERROR: Negative Variance: ' + str(cstdev))
cstdev = 0.0
cstdev = math.sqrt(max(cstdev, 0.0))
# print('kriging estimate and variance' + str(cmean) + ', ' + str(cstdev))
return cmean, cstdev
def getindex(nc, cmn, csiz, loc):
ic = min(int((loc - cmn) / csiz), nc - 1)
return ic
def correct_trend(trend):
"""Correct a indicator based trend model for closure (probabilities sum to 1.0).
:param trend: ndarray [ny,nx,ncut]
:return: nadarray [ny,nx,ncut] corrected for closure
"""
ny = trend.shape[0]
nx = trend.shape[1]
ncut = trend.shape[2]
for iy in range(0, ny):
for ix in range(0, nx):
sum = 0.0
for ic in range(0, ncut):
sum = sum + trend[iy, ix, ic]
if sum > 0.0:
for icut in range(0, ncut):
trend[iy, ix, ic] = trend[iy, ix, ic] / sum
return trend
def ordrel(ivtype, ncut, ccdf):
"""Correct a indicator based CDF for order relations.
:param ivtype: variable type, 0 - categorical and 1 - continuous
:param ncut: number of categories or thresholds
:param ccdf: input cumulative distribution function
:return: cumulative distribution function correct for order relations
"""
# print('input ordering relations'); print(ccdf)
ccdfo = np.zeros(ncut)
ccdf1 = np.zeros(ncut)
ccdf2 = np.zeros(ncut) # do we need MAXCUT = 100 for these 2?
# Make sure conditional cdf is within [0,1]:
for i in range(0, ncut):
if ccdf[i] < 0.0:
ccdf1[i] = 0.0
ccdf2[i] = 0.0
elif ccdf[i] > 1.0:
ccdf1[i] = 1.0
ccdf2[i] = 1.0
else:
ccdf1[i] = ccdf[i]
ccdf2[i] = ccdf[i]
# print('ordering relations'); print(ccdf1,ccdf2)
# Correct sequentially up, then down, and then average:
if ivtype == 0:
sumcdf = 0.0
for i in range(0, ncut):
sumcdf = sumcdf + ccdf1[i]
if sumcdf <= 0.0:
sumcdf = 1.0
for i in range(0, ncut):
ccdfo[i] = ccdf1[i] / sumcdf
else:
for i in range(1, ncut):
if ccdf1[i] < ccdf1[i-1]:
ccdf1[i] = ccdf1[i-1]
for i in range(ncut-2, 0, -1):
if ccdf2[i] > ccdf2[i+1]:
ccdf2[i] = ccdf2[i+1]
for i in range(0, ncut):
ccdfo[i] = 0.5*(ccdf1[i]+ccdf2[i])
# Return with corrected CDF:
return ccdfo
def declus(df, xcol, ycol, vcol, iminmax, noff, ncell, cmin, cmax):
"""GSLIB's DECLUS program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note this was simplified to 2D only.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param iminmax: 1 / True: for use cell size with max decluster mean
0 / False: for declustered mean minimizing cell size
:param noff: number of offsets
:param ncell: number of cell sizes
:param cmin: min cell size
:param cmax: max cell size
:return: TODO
"""
# Load data and set up arrays
nd = len(df)
x = df[xcol].values
y = df[ycol].values
v = df[vcol].values
wt = np.zeros(nd)
wtopt = np.ones(nd)
index = np.zeros(nd, np.int32)
xcs_mat = np.zeros(ncell + 2) # we use 1,...,n for this array
vrcr_mat = np.zeros(ncell + 2) # we use 1,...,n for this array
anisy = 1.0 # hard code the cells to 2D isotropic
roff = float(noff)
# Calculate extents
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
# Calculate summary statistics
vmean = np.mean(v)
vstdev = np.std(v)
vmin = np.min(v)
vmax = np.max(v)
xcs_mat[0] = 0.0
vrcr_mat[0] = vmean
vrop = vmean # include the naive case
print(f"There are {nd} data with:")
print(f" mean of {vmean} ")
print(f" min and max {vmin} and {vmax}")
print(f" standard dev {vstdev} ")
# Define a "lower" origin to use for the cell sizes
xo1 = xmin - 0.01
yo1 = ymin - 0.01
# Define the increment for the cell size
xinc = (cmax - cmin) / ncell
yinc = xinc
# Loop over "ncell+1" cell sizes in the grid network
ncellx = int((xmax - (xo1 - cmin)) / cmin) + 1
ncelly = int((ymax - (yo1 - cmin * anisy)) / cmin) + 1
ncellt = ncellx * ncelly
cellwt = np.zeros(ncellt)
xcs = cmin - xinc
ycs = (cmin * anisy) - yinc
# Main loop over cell sizes
# 0 index is the 0.0 cell, note n + 1 in Fortran
for lp in range(1, ncell + 2):
xcs = xcs + xinc
ycs = ycs + yinc
# Initialize the weights to zero
wt.fill(0.0)
# Determine the maximum number of grid cells in the network
ncellx = int((xmax - (xo1 - xcs)) / xcs) + 1
ncelly = int((ymax - (yo1 - ycs)) / ycs) + 1
ncellt = float(ncellx * ncelly) # TODO: not used
# Loop over all the origin offsets selected
xfac = min((xcs / roff), (0.5 * (xmax - xmin)))
yfac = min((ycs / roff), (0.5 * (ymax - ymin)))
for kp in range(1, noff + 1):
xo = xo1 - (float(kp) - 1.0) * xfac
yo = yo1 - (float(kp) - 1.0) * yfac
# Initialize the cumulative weight indicators
cellwt.fill(0.0)
# Determine which cell each datum is in
for i in range(0, nd):
icellx = int((x[i] - xo) / xcs) + 1
icelly = int((y[i] - yo) / ycs) + 1
icell = icellx + (icelly - 1) * ncellx
index[i] = icell
cellwt[icell] = cellwt[icell] + 1.0
# The weight assigned to each datum is inversely proportional to the
# number of data in the cell. We first need to get the sum of
# weights so that we can normalize the weights to sum to one
sumw = 0.0
for i in range(0, nd):
ipoint = index[i]
sumw = sumw + (1.0 / cellwt[ipoint])
sumw = 1.0 / sumw
# Accumulate the array of weights (that now sum to one)
for i in range(0, nd):
ipoint = index[i]
wt[i] = wt[i] + (1.0 / cellwt[ipoint]) * sumw
# End loop over all offsets
# Compute the weighted average for this cell size
sumw = 0.0
sumwg = 0.0
for i in range(0, nd):
sumw = sumw + wt[i]
sumwg = sumwg + wt[i] * v[i]
vrcr = sumwg / sumw
vrcr_mat[lp] = vrcr
xcs_mat[lp] = xcs
# See if this weighting is optimal
if iminmax and vrcr < vrop or not iminmax and vrcr > vrop or ncell == 1:
best = xcs # TODO: not used
vrop = vrcr
wtopt = wt.copy() # deep copy
# End main loop over all cell sizes
# Get the optimal weights
sumw = 0.0
for i in range(0, nd):
sumw = sumw + wtopt[i]
wtmin = np.min(wtopt) # TODO: not used
wtmax = np.max(wtopt) # TODO: not used
facto = float(nd) / sumw
wtopt = wtopt * facto
return wtopt, xcs_mat, vrcr_mat
def gam(array, tmin, tmax, xsiz, ysiz, ixd, iyd, nlag, isill):
"""GSLIB's GAM program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param array: 2D gridded data / model
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xsiz: grid cell extents in x direction
:param ysiz: grid cell extents in y direction
:param ixd: lag offset in grid cells
:param iyd: lag offset in grid cells
:param nlag: number of lags to calculate
:param isill: 1 for standardize sill
:return: TODO
"""
if array.ndim == 2:
ny, nx = array.shape
elif array.ndim == 1:
ny, nx = 1, len(array)
nvarg = 1 # for multiple variograms repeat the program
nxy = nx * ny # TODO: not used
mxdlv = nlag
# Allocate the needed memory
lag = np.zeros(mxdlv)
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv) # TODO: not used
npp = np.zeros(mxdlv)
ivtail = np.zeros(nvarg + 2)
ivhead = np.zeros(nvarg + 2)
ivtype = np.zeros(nvarg + 2)
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
# Summary statistics for the data after trimming
inside = (array > tmin) & (array < tmax)
avg = array[(array > tmin) & (array < tmax)].mean() # TODO: not used
stdev = array[(array > tmin) & (array < tmax)].std()
var = stdev ** 2.0
vrmin = array[(array > tmin) & (array < tmax)].min() # TODO: not used
vrmax = array[(array > tmin) & (array < tmax)].max() # TODO: not used
num = ((array > tmin) & (array < tmax)).sum() # TODO: not used
# For the fixed seed point, loop through all directions
for iy in range(0, ny):
for ix in range(0, nx):
if inside[iy, ix]:
vrt = array[iy, ix]
ixinc = ixd
iyinc = iyd
ix1 = ix
iy1 = iy
for il in range(0, nlag):
ix1 = ix1 + ixinc
if 0 <= ix1 < nx:
iy1 = iy1 + iyinc
if 1 <= iy1 < ny:
if inside[iy1, ix1]:
vrh = array[iy1, ix1]
npp[il] = npp[il] + 1
tm[il] = tm[il] + vrt
hm[il] = hm[il] + vrh
vario[il] = vario[il] + ((vrh - vrt) ** 2.0)
# Get average values for gam, hm, tm, hv, and tv, then compute the correct
# "variogram" measure
for il in range(0, nlag):
if npp[il] > 0:
rnum = npp[il]
lag[il] = np.sqrt((ixd * xsiz * il) ** 2 + (iyd * ysiz * il) ** 2)
vario[il] = vario[il] / float(rnum)
hm[il] = hm[il] / float(rnum)
tm[il] = tm[il] / float(rnum)
# Standardize by the sill
if isill == 1:
vario[il] = vario[il] / var
# Semivariogram
vario[il] = 0.5 * vario[il]
return lag, vario, npp
def gamv(df, xcol, ycol, vcol, tmin, tmax, xlag, xltol, nlag, azm, atol, bandwh, isill):
"""GSLIB's GAMV program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
Note simplified for 2D, semivariogram only and one direction at a time.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:param isill: 1 for standardize sill
:return: TODO
"""
# Load the data
# Trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract) # TODO: not used
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
return _gamv(nd, x, y, vr, xcol, ycol, vcol, tmin, tmax, xlag, xltol, nlag, azm, atol, bandwh,
isill)
@jit(**JITKW, **JITPL)
def _gamv(nd, x, y, vr, xcol, ycol, vcol, tmin, tmax, xlag, xltol, nlag, azm, atol, bandwh, isill):
# Summary statistics for the data after trimming
avg = vr.mean() # TODO: not used
stdev = vr.std()
sills = stdev ** 2.0
ssq = sills # TODO: not used
vrmin = vr.min() # TODO: not used
vrmax = vr.max() # TODO: not used
# Define the distance tolerance if it isn't already
if xltol < 0.0:
xltol = 0.5 * xlag
# Loop over combinatorial of data pairs to calculate the variogram
dis, vario, npp = variogram_loop(
x, y, vr, xlag, xltol, nlag, azm, atol, bandwh
)
# Standardize sill to one by dividing all variogram values by the variance
for il in nb.prange(0, nlag + 2):
if isill == 1:
vario[il] = vario[il] / sills
# Apply 1/2 factor to go from variogram to semivariogram
vario[il] = 0.5 * vario[il]
return dis, vario, npp
@jit(**JITKW, **JITPL)
def variogram_loop(x, y, vr, xlag, xltol, nlag, azm, atol, bandwh):
"""Calculate the variogram by looping over combinatorial of data pairs.
:param x: x values
:param y: y values
:param vr: property values
:param xlag: lag distance
:param xltol: lag distance tolerance
:param nlag: number of lags to calculate
:param azm: azimuth
:param atol: azimuth tolerance
:param bandwh: horizontal bandwidth / maximum distance offset orthogonal to
azimuth
:return: TODO
"""
# Allocate the needed memory
nvarg = 1
mxdlv = nlag + 2 # in gamv the npp etc. arrays go to nlag + 2
dis = np.zeros(mxdlv)
lag = np.zeros(mxdlv) # TODO: not used
vario = np.zeros(mxdlv)
hm = np.zeros(mxdlv)
tm = np.zeros(mxdlv)
hv = np.zeros(mxdlv) # TODO: not used
npp = np.zeros(mxdlv)
ivtail = np.zeros(nvarg + 2)
ivhead = np.zeros(nvarg + 2)
ivtype = np.ones(nvarg + 2)
ivtail[0] = 0
ivhead[0] = 0
ivtype[0] = 0
EPSLON = 1.0e-20
nd = len(x)
# The mathematical azimuth is measured counterclockwise from EW and
# not clockwise from NS as the conventional azimuth is
azmuth = (90.0 - azm) * math.pi / 180.0
uvxazm = math.cos(azmuth)
uvyazm = math.sin(azmuth)
if atol <= 0.0:
csatol = math.cos(45.0 * math.pi / 180.0)
else:
csatol = math.cos(atol * math.pi / 180.0)
# Initialize the arrays for each direction, variogram, and lag
nsiz = nlag + 2 # TODO: not used
dismxs = ((float(nlag) + 0.5 - EPSLON) * xlag) ** 2
# Main loop over all pairs
for i in nb.prange(0, nd):
for j in nb.prange(0, nd):
# Definition of the lag corresponding to the current pair
dx = x[j] - x[i]
dy = y[j] - y[i]
dxs = dx * dx
dys = dy * dy
hs = dxs + dys
if hs <= dismxs:
if hs < 0.0:
hs = 0.0
h = np.sqrt(hs)
# Determine which lag this is and skip if outside the defined
# distance tolerance
if h <= EPSLON:
lagbeg = 0
lagend = 0
else:
lagbeg = -1
lagend = -1
for ilag in range(1, nlag + 1):
# reduced to -1
if (
(xlag * float(ilag - 1) - xltol)
<= h
<= (xlag * float(ilag - 1) + xltol)
):
if lagbeg < 0:
lagbeg = ilag
lagend = ilag
if lagend >= 0:
# Definition of the direction corresponding to the current
# pair. All directions are considered (overlapping of
# direction tolerance cones is allowed)
# Check for an acceptable azimuth angle
dxy = np.sqrt(max((dxs + dys), 0.0))
if dxy < EPSLON:
dcazm = 1.0
else:
dcazm = (dx * uvxazm + dy * uvyazm) / dxy
# Check the horizontal bandwidth criteria (maximum deviation
# perpendicular to the specified direction azimuth)
band = uvxazm * dy - uvyazm * dx
# Apply all the previous checks at once to avoid a lot of
# nested if statements
if (abs(dcazm) >= csatol) and (abs(band) <= bandwh):
# Check whether or not an omni-directional variogram is
# being computed
omni = False
if atol >= 90.0:
omni = True
# For this variogram, sort out which is the tail and
# the head value
iv = 0 # hardcoded just one variogram
it = ivtype[iv] # TODO: not used
if dcazm >= 0.0:
vrh = vr[i]
vrt = vr[j]
if omni:
vrtpr = vr[i]
vrhpr = vr[j]
else:
vrh = vr[j]
vrt = vr[i]
if omni:
vrtpr = vr[j]
vrhpr = vr[i]
# Reject this pair on the basis of missing values
# Data was trimmed at the beginning
# The Semivariogram (all other types of measures are
# removed for now)
for il in range(lagbeg, lagend + 1):
npp[il] = npp[il] + 1
dis[il] = dis[il] + h
tm[il] = tm[il] + vrt
hm[il] = hm[il] + vrh
vario[il] = vario[il] + ((vrh - vrt) * (vrh - vrt))
if omni:
npp[il] = npp[il] + 1.0
dis[il] = dis[il] + h
tm[il] = tm[il] + vrtpr
hm[il] = hm[il] + vrhpr
vario[il] = vario[il] + (
(vrhpr - vrtpr) * (vrhpr - vrtpr)
)
# Get average values for gam, hm, tm, hv, and tv, then compute the correct
# "variogram" measure
for il in range(0, nlag + 2):
i = il
if npp[i] > 0:
rnum = npp[i]
dis[i] = dis[i] / rnum
vario[i] = vario[i] / rnum
hm[i] = hm[i] / rnum
tm[i] = tm[i] / rnum
return dis, vario, npp
def varmapv(df, xcol, ycol, vcol, tmin, tmax, nxlag, nylag, dxlag, dylag, minnp, isill):
"""Calculate the variogram map from irregularly spaced data.
:param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns
:param xcol: DataFrame column with x coordinate
:param ycol: DataFrame column with y coordinate
:param vcol: DataFrame column with value of interest
:param tmin: lower trimming limit
:param tmax: upper trimming limit
:param nxlag: number of lags in the x direction
:param nxlag: number of lags in the y direction
:param dxlag: size of the lags in the x direction
:param dylag: size of the lags in the y direction
:param minnp: minimum number of pairs to calculate a variogram value
:param isill: standardize sill to be 1.0
:return: TODO
"""
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
return _varmapv(nd, x, y, vr, xcol, ycol, vcol, tmin, tmax, nxlag, nylag, dxlag, dylag, minnp,
isill)
@jit(**JITKW, **JITPL) # runs faster not in parallel
def _varmapv(nd, x, y, vr, xcol, ycol, vcol, tmin, tmax, nxlag, nylag, dxlag, dylag, minnp, isill):
"""Calculate the variogram map from irregularly spaced data.
:param df: DataFrame with the spatial data, xcol, ycol, vcol coordinates and property columns
:param xcol: DataFrame column with x coordinate
:param ycol: DataFrame column with y coordinate
:param vcol: DataFrame column with value of interest
:param tmin: lower trimming limit
:param tmax: upper trimming limit
:param nxlag: number of lags in the x direction
:param nxlag: number of lags in the y direction
:param dxlag: size of the lags in the x direction
:param dylag: size of the lags in the y direction
:param minnp: minimum number of pairs to calculate a variogram value
:param isill: standardize sill to be 1.0
:return: TODO
"""
# Summary statistics for the data after trimming
avg = vr.mean()
stdev = vr.std()
sills = stdev**2.0
ssq = sills
vrmin = vr.min()
vrmax = vr.max()
# Initialize the summation arrays
npp = np.zeros((nylag*2+1, nxlag*2+1))
gam = np.zeros((nylag*2+1, nxlag*2+1))
nppf = np.zeros((nylag*2+1, nxlag*2+1))
gamf = np.zeros((nylag*2+1, nxlag*2+1))
hm = np.zeros((nylag*2+1, nxlag*2+1))
tm = np.zeros((nylag*2+1, nxlag*2+1))
hv = np.zeros((nylag*2+1, nxlag*2+1))
tv = np.zeros((nylag*2+1, nxlag*2+1))
# First fix the location of a seed point:
for i in nb.prange(0, nd):
# Second loop over the data:
for j in nb.prange(0, nd):
# The lag:
ydis = y[j] - y[i]
iyl = nylag + int(ydis/dylag)
if iyl < 0 or iyl > nylag*2: # acocunting for 0,...,n-1 array indexing
continue
xdis = x[j] - x[i]
ixl = nxlag + int(xdis/dxlag)
if ixl < 0 or ixl > nxlag*2: # acocunting for 0,...,n-1 array indexing
continue
# We have an acceptable pair, therefore accumulate all the statistics
# that are required for the variogram:
# our ndarrays read from the base to top, so we flip
npp[iyl, ixl] = npp[iyl, ixl] + 1
tm[iyl, ixl] = tm[iyl, ixl] + vr[i]
hm[iyl, ixl] = hm[iyl, ixl] + vr[j]
tv[iyl, ixl] = tm[iyl, ixl] + vr[i]*vr[i]
hv[iyl, ixl] = hm[iyl, ixl] + vr[j]*vr[j]
gam[iyl, ixl] = gam[iyl, ixl] + ((vr[i]-vr[j])*(vr[i]-vr[j]))
# Get average values for gam, hm, tm, hv, and tv, then compute
# the correct "variogram" measure:
for iy in nb.prange(0, nylag*2+1):
for ix in nb.prange(0, nxlag*2+1):
if npp[iy, ix] <= minnp:
gam[iy, ix] = -999.
hm[iy, ix] = -999.
tm[iy, ix] = -999.
hv[iy, ix] = -999.
tv[iy, ix] = -999.
else:
rnum = npp[iy, ix]
gam[iy, ix] = gam[iy, ix] / (2*rnum) # semivariogram
hm[iy, ix] = hm[iy, ix] / rnum
tm[iy, ix] = tm[iy, ix] / rnum
hv[iy, ix] = hv[iy, ix] / rnum - hm[iy, ix]*hm[iy, ix]
tv[iy, ix] = tv[iy, ix] / rnum - tm[iy, ix]*tm[iy, ix]
# Attempt to standardize:
if isill > 0:
gamf[iy, ix] = gamf[iy, ix]/sills
for iy in nb.prange(0, nylag*2+1):
for ix in nb.prange(0, nxlag*2+1):
gamf[iy, ix] = gam[nylag*2-iy, ix]
nppf[iy, ix] = npp[nylag*2-iy, ix]
return gamf, nppf
def vmodel(
nlag,
xlag,
azm,
vario
):
"""GSLIB's VMODEL program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Mar, 2019).
:param nlag: number of variogram lags
:param xlag: size of the lags
:param axm: direction by 2D azimuth, 000 is y positive, 090 is x positive
:param vario: dictionary with the variogram parameters
:return:
"""
# Parameters
MAXNST = 4
DEG2RAD = 3.14159265/180.0
MAXROT = MAXNST+1
EPSLON = 1.0e-20
VERSION = 1.01
# Declare arrays
index = np.zeros(nlag+1)
h = np.zeros(nlag+1)
gam = np.zeros(nlag+1)
cov = np.zeros(nlag+1)
ro = np.zeros(nlag+1)
# Load the variogram
nst = vario["nst"]
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario["nug"]
cc[0] = vario["cc1"]
it[0] = vario["it1"]
ang[0] = vario["azi1"]
aa[0] = vario["hmaj1"]
anis[0] = vario["hmin1"] / vario["hmaj1"]
if nst == 2:
cc[1] = vario["cc2"]
it[1] = vario["it2"]
ang[1] = vario["azi2"]
aa[1] = vario["hmaj2"]
anis[1] = vario["hmin2"] / vario["hmaj2"]
xoff = math.sin(DEG2RAD*azm)*xlag
yoff = math.cos(DEG2RAD*azm)*xlag
print(' x,y,z offsets = ' + str(xoff) + ',' + str(yoff))
rotmat, maxcov = setup_rotmat(c0, nst, it, cc, ang, 99999.9)
xx = 0.0
yy = 0.0
for il in range(0, nlag+1):
index[il] = il
cov[il] = cova2(0.0, 0.0, xx, yy, nst, c0, 9999.9,
cc, aa, it, ang, anis, rotmat, maxcov)
gam[il] = maxcov - cov[il]
ro[il] = cov[il]/maxcov
h[il] = math.sqrt(max((xx*xx+yy*yy), 0.0))
xx = xx + xoff
yy = yy + yoff
# finished
return index, h, gam, cov, ro
def nscore(
df, vcol, wcol=None, ismooth=False, dfsmooth=None, smcol=0, smwcol=0
):
"""GSLIB's NSCORE program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param vcol: name of the variable column
:param wcol: name of the weight column, if None assumes equal weighting
:param ismooth: if True then use a reference distribution
:param dfsmooth: pandas DataFrame required if reference distribution is used
:param smcol: reference distribution property (required if reference
distribution is used)
:param smwcol: reference distribution weight (required if reference
distribution is used)
:return: TODO
"""
# Set constants
np.random.seed(73073)
pwr = 1.0 # interpolation power, hard coded to 1.0 in GSLIB
EPSILON = 1.0e-20
# Decide which file to use for establishing the transformation table
if ismooth:
nd = len(dfsmooth)
vr = dfsmooth[smcol].values
wt_ns = np.ones(nd)
if smwcol != 0:
wt_ns = dfsmooth[smwcol].values
else:
nd = len(df)
vr = df[vcol].values
wt_ns = np.ones(nd)
if wcol is not None:
wt_ns = df[wcol].values
twt = np.sum(wt_ns)
# Sort data by value
istart = 0
iend = nd
vr, wt_ns = dsortem(istart, iend, vr, 2, wt_ns)
# Compute the cumulative probabilities and write transformation table
wtfac = 1.0 / twt
oldcp = 0.0
cp = 0.0
for j in range(istart, iend):
w = wtfac * wt_ns[j]
cp = cp + w
wt_ns[j] = (cp + oldcp) / 2.0
vrrg = gauinv(wt_ns[j])
vrg = float(vrrg)
oldcp = cp
# Now, reset the weight to the normal scores value
wt_ns[j] = vrg
# Normal scores transform
nd_trans = len(df)
ns = np.zeros(nd_trans)
val = df[vcol].values
for i in range(0, nd_trans):
vrr = val[i] + np.random.rand() * EPSILON
# Now, get the normal scores value for "vrr"
j = dlocate(vr, 1, nd, vrr)
j = min(max(1, j), (nd - 1))
ns[i] = dpowint(vr[j], vr[j + 1], wt_ns[j], wt_ns[j + 1], vrr, pwr)
return ns, vr, wt_ns
def kb2d(
df,
xcol,
ycol,
vcol,
tmin,
tmax,
nx,
xmn,
xsiz,
ny,
ymn,
ysiz,
nxdis,
nydis,
ndmin,
ndmax,
radius,
ktype,
skmean,
vario,
):
"""GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
"""
# Constants
UNEST = -999.
EPSLON = 1.0e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = ndmax + 1
MAXDIS = nxdis * nydis
MAXKD = MAXSAM + 1
MAXKRG = MAXKD * MAXKD
# load the variogram
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = vario['hmin1']/vario['hmaj1']
if nst == 2:
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = vario['hmin2']/vario['hmaj2']
# Allocate the needed memory:
xdb = np.zeros(MAXDIS)
ydb = np.zeros(MAXDIS)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
kmap = np.zeros((nx, ny))
vmap = np.zeros((nx, ny))
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0, nd))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
# Summary statistics for the data after trimming
avg = vr.mean()
stdev = vr.std()
ss = stdev**2.0
vrmin = vr.min()
vrmax = vr.max()
# Set up the discretization points per block. Figure out how many
# are needed, the spacing, and fill the xdb and ydb arrays with the
# offsets relative to the block center (this only gets done once):
ndb = nxdis * nydis
if ndb > MAXDIS:
print('ERROR KB2D: Too many discretization points ')
print(' Increase MAXDIS or lower n[xy]dis')
return kmap
xdis = xsiz / max(float(nxdis), 1.0)
ydis = ysiz / max(float(nydis), 1.0)
xloc = -0.5*(xsiz+xdis)
i = -1 # accounting for 0 as lowest index
for ix in range(0, nxdis):
xloc = xloc + xdis
yloc = -0.5*(ysiz+ydis)
for iy in range(0, nydis):
yloc = yloc + ydis
i = i+1
xdb[i] = xloc
ydb[i] = yloc
# Initialize accumulators:
cbb = 0.0
rad2 = radius*radius
# Calculate Block Covariance. Check for point kriging.
rotmat, maxcov = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(xdb[0], ydb[0], xdb[0], ydb[0], nst, c0,
PMX, cc, aa, it, ang, anis, rotmat, maxcov)
# Keep this value to use for the unbiasedness constraint:
unbias = cov
first = False
if ndb <= 1:
cbb = cov
else:
for i in range(0, ndb):
for j in range(0, ndb):
cov = cova2(xdb[i], ydb[i], xdb[j], ydb[j], nst,
c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if i == j:
cov = cov - c0
cbb = cbb + cov
cbb = cbb/real(ndb*ndb)
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
nk = 0
ak = 0.0
vk = 0.0
for iy in range(0, ny):
yloc = ymn + (iy-0)*ysiz
for ix in range(0, nx):
xloc = xmn + (ix-0)*xsiz
current_node = (yloc, xloc)
# Find the nearest samples within each octant: First initialize
# the counter arrays:
na = -1 # accounting for 0 as first index
dist.fill(1.0e+20)
nums.fill(-1)
# use kd tree for fast nearest data search
dist, nums = tree.query(
current_node, ndmax, distance_upper_bound=radius)
# remove any data outside search radius
na = len(dist)
nums = nums[dist < radius]
dist = dist[dist < radius]
na = len(dist)
# Is there enough samples?
if na + 1 < ndmin: # accounting for min index of 0
est = UNEST
estv = UNEST
print('UNEST at ' + str(ix) + ',' + str(iy))
else:
# Put coordinates and values of neighborhood samples into xa,ya,vra:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
# Handle the situation of only one sample:
if na == 0: # accounting for min index of 0 - one sample case na = 0
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0,
PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = xa[0] - xloc
yy = ya[0] - yloc
# Establish Right Hand Side Covariance:
if ndb <= 1:
cb = cova2(
xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for i in range(0, ndb):
cb = cb + \
cova2(
xx, yy, xdb[i], ydb[i], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
dx = xx - xdb[i]
dy = yy - ydb[i]
if (dx*dx+dy*dy) < EPSLON:
cb = cb - c0
cb = cb / real(ndb)
if ktype == 0:
s[0] = cb/cbb
est = s[0]*vra[0] + (1.0-s[0])*skmean
estv = cbb - s[0] * cb
else:
est = vra[0]
estv = cbb - 2.0*cb + cb1
else:
# Solve the Kriging System with more than one sample:
neq = na + ktype # accounting for first index of 0
# print('NEQ' + str(neq))
nn = (neq + 1)*neq/2
# Set up kriging matrices:
iin = -1 # accounting for first index of 0
for j in range(0, na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0, na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(
xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if ktype == 1:
iin = iin + 1
a[iin] = unbias
xx = xa[j] - xloc
yy = ya[j] - yloc
# Establish Right Hand Side Covariance:
if ndb <= 1:
cb = cova2(
xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for j1 in range(0, ndb):
cb = cb + \
cova2(
xx, yy, xdb[j1], ydb[j1], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
dx = xx - xdb[j1]
dy = yy - ydb[j1]
if (dx*dx+dy*dy) < EPSLON:
cb = cb - c0
cb = cb / real(ndb)
r[j] = cb
rr[j] = r[j]
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0, na):
iin = iin + 1
a[iin] = unbias
iin = iin + 1
a[iin] = 0.0
r[neq-1] = unbias
rr[neq-1] = r[neq]
# Solve the Kriging System:
# print('NDB' + str(ndb))
# print('NEQ' + str(neq) + ' Left' + str(a) + ' Right' + str(r))
# stop
s = ksol_numpy(neq, a, r)
ising = 0 # need to figure this out
# print('weights' + str(s))
# stop
# Write a warning if the matrix is singular:
if ising != 0:
print('WARNING KB2D: singular matrix')
print(' for block' +
str(ix) + ',' + str(iy) + ' ')
est = UNEST
estv = UNEST
else:
# Compute the estimate and the kriging variance:
est = 0.0
estv = cbb
sumw = 0.0
if ktype == 1:
estv = estv - (s[na])*unbias
for i in range(0, na):
sumw = sumw + s[i]
est = est + s[i]*vra[i]
estv = estv - s[i]*rr[i]
if ktype == 0:
est = est + (1.0-sumw)*skmean
kmap[ny-iy-1, ix] = est
vmap[ny-iy-1, ix] = estv
if est > UNEST:
nk = nk + 1
ak = ak + est
vk = vk + est*est
# END OF MAIN LOOP OVER ALL THE BLOCKS:
if nk >= 1:
ak = ak / float(nk)
vk = vk/float(nk) - ak*ak
print(' Estimated ' + str(nk) + ' blocks ')
print(' average ' + str(ak) + ' variance ' + str(vk))
return kmap, vmap
def kb2d_jit(
df,
xcol,
ycol,
vcol,
tmin,
tmax,
nx,
xmn,
xsiz,
ny,
ymn,
ysiz,
nxdis,
nydis,
ndmin,
ndmax,
radius,
ktype,
skmean,
vario,
):
"""GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
"""
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Make a KDTree for fast search of nearest neighbours
# dp = list((y[i], x[i]) for i in range(0,nd))
data_locs = np.column_stack((y, x))
import scipy.spatial as sp
_tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
vario_int = Dict.empty(
key_type=nb.types.unicode_type,
value_type=nb.types.i8
)
vario_float = Dict.empty(
key_type=nb.types.unicode_type,
value_type=nb.f8
)
vario = copy(vario)
vario_float['nug'] = vario.pop('nug')
vario_float['cc1'] = vario.pop('cc1')
vario_float['cc2'] = vario.pop('cc2')
for key in vario.keys():
vario_int[key] = vario[key]
tree = Dict.empty(
key_type=nb.types.Tuple((nb.f8, nb.f8)),
value_type=nb.types.Tuple((nb.f8[:], nb.i4[:]))
)
for iy in range(0, ny):
yloc = ymn + (iy-0)*ysiz
for ix in range(0, nx):
xloc = xmn + (ix-0)*xsiz
current_node = (yloc, xloc)
tree[current_node] = _tree.query(
current_node, ndmax, distance_upper_bound=radius)
kmap, vmap = _kb2d_jit(
tree, nd, x, y, vr,
xcol, ycol, vcol, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, nxdis, nydis, ndmin,
ndmax, radius, ktype, skmean, vario_int, vario_float)
return kmap, vmap
@jit(**JITKW) # numba crashed on parallel=True
def _kb2d_jit(
tree, nd, x, y, vr,
xcol, ycol, vcol, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, nxdis, nydis, ndmin,
ndmax, radius, ktype, skmean, vario_int, vario_float):
# Constants
UNEST = -999.
EPSLON = 1.0e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = ndmax + 1
MAXDIS = nxdis * nydis
MAXKD = MAXSAM + 1
MAXKRG = MAXKD * MAXKD
ndmax = min(ndmax, nd)
# load the variogram
nst = vario_int['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
# Allocate the needed memory:
xdb = np.zeros(MAXDIS)
ydb = np.zeros(MAXDIS)
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
# dist = np.zeros(MAXSAM)
# nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
kmap = np.zeros((nx, ny))
vmap = np.zeros((nx, ny))
c0 = vario_float['nug']
cc[0] = vario_float['cc1']
it[0] = vario_int['it1']
ang[0] = vario_int['azi1']
aa[0] = vario_int['hmaj1']
anis[0] = vario_int['hmin1']/vario_int['hmaj1']
if nst == 2:
cc[1] = vario_float['cc2']
it[1] = vario_int['it2']
ang[1] = vario_int['azi2']
aa[1] = vario_int['hmaj2']
anis[1] = vario_int['hmin2']/vario_int['hmaj2']
# Summary statistics for the data after trimming
avg = np.mean(vr)
stdev = np.std(vr)
ss = stdev ** 2.0
vrmin = np.min(vr)
vrmax = np.max(vr)
# Set up the discretization points per block. Figure out how many
# are needed, the spacing, and fill the xdb and ydb arrays with the
# offsets relative to the block center (this only gets done once):
ndb = nxdis * nydis
if ndb > MAXDIS:
print('ERROR KB2D: Too many discretization points ')
print(' Increase MAXDIS or lower n[xy]dis')
# return kmap
xdis = xsiz / max(float(nxdis), 1.0)
ydis = ysiz / max(float(nydis), 1.0)
xloc = -0.5 * (xsiz + xdis)
i = -1 # accounting for 0 as lowest index
for ix in range(0, nxdis):
xloc = xloc + xdis
yloc = -0.5 * (ysiz+ydis)
for iy in range(0, nydis):
yloc = yloc + ydis
i += 1
xdb[i] = xloc
ydb[i] = yloc
# Initialize accumulators:
cbb = 0.0
rad2 = radius * radius
# Calculate Block Covariance. Check for point kriging.
rotmat, maxcov = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(xdb[0], ydb[0], xdb[0], ydb[0], nst, c0,
PMX, cc, aa, it, ang, anis, rotmat, maxcov)
# Keep this value to use for the unbiasedness constraint:
unbias = cov
first = False
if ndb <= 1:
cbb = cov
else:
for i in nb.prange(0, ndb):
for j in nb.prange(0, ndb):
cov = cova2(xdb[i], ydb[i], xdb[j], ydb[j], nst,
c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if i == j:
cov = cov - c0
cbb += cov
# print(real)
# cbb = cbb/real(ndb*ndb) # TODO: undefined?
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
nk = 0
ak = 0.0
vk = 0.0
for iy in nb.prange(0, ny):
yloc = ymn + (iy-0)*ysiz
for ix in nb.prange(0, nx):
xloc = xmn + (ix-0)*xsiz
current_node = (yloc, xloc)
# Find the nearest samples within each octant: First initialize
# the counter arrays:
na = -1 # accounting for 0 as first index
# dist.fill(1.0e+20)
# nums.fill(-1)
# dist, nums = tree.query(current_node,ndmax, distance_upper_bound=radius) # use kd tree for fast nearest data search
dist, nums = tree[current_node]
# remove any data outside search radius
nums = nums[dist < radius]
dist = dist[dist < radius]
na = len(dist)
# Is there enough samples?
if na + 1 < ndmin: # accounting for min index of 0
est = UNEST
estv = UNEST
# print('UNEST at ' + str(ix) + ',' + str(iy))
else:
# Put coordinates and values of neighborhood samples into xa,ya,vra:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
# Handle the situation of only one sample:
if na == 0: # accounting for min index of 0 - one sample case na = 0
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0,
PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = xa[0] - xloc
yy = ya[0] - yloc
# Establish Right Hand Side Covariance:
if ndb <= 1:
cb = cova2(
xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for i in range(0, ndb):
cb = cb + \
cova2(
xx, yy, xdb[i], ydb[i], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
dx = xx - xdb[i]
dy = yy - ydb[i]
if (dx*dx+dy*dy) < EPSLON:
cb = cb - c0
# cb = cb / real(ndb) # TODO: undefined?
if ktype == 0:
s[0] = cb/cbb
est = s[0]*vra[0] + (1.0-s[0])*skmean
estv = cbb - s[0] * cb
else:
est = vra[0]
estv = cbb - 2.0*cb + cb1
else:
# Solve the Kriging System with more than one sample:
neq = na + ktype # accounting for first index of 0
# print('NEQ' + str(neq))
nn = (neq + 1)*neq/2
# Set up kriging matrices:
iin = -1 # accounting for first index of 0
for j in range(0, na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0, na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(
xa[i], ya[i], xa[j], ya[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if ktype == 1:
iin = iin + 1
a[iin] = unbias
xx = xa[j] - xloc
yy = ya[j] - yloc
# Establish Right Hand Side Covariance:
if ndb <= 1:
cb = cova2(
xx, yy, xdb[0], ydb[0], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
else:
cb = 0.0
for j1 in range(0, ndb):
cb = cb + \
cova2(
xx, yy, xdb[j1], ydb[j1], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
dx = xx - xdb[j1]
dy = yy - ydb[j1]
if (dx*dx+dy*dy) < EPSLON:
cb = cb - c0
# cb = cb / real(ndb) # TODO: undefined?
r[j] = cb
rr[j] = r[j]
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0, na):
iin = iin + 1
a[iin] = unbias
iin = iin + 1
a[iin] = 0.0
r[neq-1] = unbias
rr[neq-1] = r[neq]
# Solve the Kriging System:
# print('NDB' + str(ndb))
# print('NEQ' + str(neq) + ' Left' + str(a) + ' Right' + str(r))
# stop
s = ksol_numpy(neq, a, r)
ising = 0 # need to figure this out
# print('weights' + str(s))
# stop
# Write a warning if the matrix is singular:
if ising != 0:
# print('WARNING KB2D: singular matrix')
# print(' for block' + str(ix) + ',' + str(iy)+ ' ')
est = UNEST
estv = UNEST
else:
# Compute the estimate and the kriging variance:
est = 0.0
estv = cbb
sumw = 0.0
if ktype == 1:
estv = estv - (s[na])*unbias
for i in range(0, na):
sumw = sumw + s[i]
est = est + s[i]*vra[i]
estv = estv - s[i]*rr[i]
if ktype == 0:
est = est + (1.0-sumw)*skmean
kmap[ny-iy-1, ix] = est
vmap[ny-iy-1, ix] = estv
if est > UNEST:
nk = nk + 1
ak = ak + est
vk = vk + est*est
if nk >= 1:
ak = ak / float(nk)
vk = vk/float(nk) - ak*ak
# print(' Estimated ' + str(nk) + ' blocks ')
# print(' average ' + str(ak) + ' variance ' + str(vk))
return kmap, vmap
def ik2d(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, nx, xmn, xsiz, ny, ymn, ysiz, ndmin, ndmax, radius, ktype, vario):
"""A 2D version of GSLIB's IK3D Indicator Kriging program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:
"""
# Find the needed paramters:
PMX = 9999.9
MAXSAM = ndmax + 1
MAXEQ = MAXSAM + 1
mik = 0 # full indicator kriging
use_trend = False
if trend.shape[0] == nx and trend.shape[1] == ny and trend.shape[2] == ncut:
use_trend = True
# load the variogram
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((MAXNST, ncut))
aa = np.zeros((MAXNST, ncut), dtype=int)
it = np.zeros((MAXNST, ncut), dtype=int)
ang = np.zeros((MAXNST, ncut))
anis = np.zeros((MAXNST, ncut))
for icut in range(0, ncut):
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[0, icut] = vario[icut]['cc1']
it[0, icut] = vario[icut]['it1']
ang[0, icut] = vario[icut]['azi1']
aa[0, icut] = vario[icut]['hmaj1']
anis[0, icut] = vario[icut]['hmin1']/vario[icut]['hmaj1']
if nst[icut] == 2:
cc[1, icut] = vario[icut]['cc2']
it[1, icut] = vario[icut]['it2']
ang[1, icut] = vario[icut]['azi2']
aa[1, icut] = vario[icut]['hmaj2']
anis[1, icut] = vario[icut]['hmin2']/vario[icut]['hmaj2']
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
MAXDAT = len(df_extract)
MAXCUT = ncut
MAXNST = 2
MAXROT = MAXNST*MAXCUT + 1
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
# Allocate the needed memory:
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros(MAXEQ*MAXEQ)
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, MAXCUT+1))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
# The indicator data are constructed knowing the thresholds and the
# data value.
if ivtype == 0:
for icut in range(0, ncut):
vr[:, icut] = np.where(
(v <= thresh[icut] + 0.5) & (v > thresh[icut] - 0.5), '1', '0')
else:
for icut in range(0, ncut):
vr[:, icut] = np.where(v <= thresh[icut], '1', '0')
vr[:, ncut] = v
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0, MAXDAT))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
# Summary statistics of the input data
avg = vr[:, ncut].mean()
stdev = vr[:, ncut].std()
ss = stdev**2.0
vrmin = vr[:, ncut].min()
vrmax = vr[:, ncut].max()
print('Data for IK3D: Variable column ' + str(vcol))
print(' Number = ' + str(MAXDAT))
ndh = MAXDAT
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
# Set up the rotation/anisotropy matrices that are needed for the
# variogram and search:
print('Setting up rotation matrices for variogram and search')
radsqd = radius * radius
rotmat = []
for ic in range(0, ncut):
rotmat_temp, maxcov[ic] = setup_rotmat(c0[ic], int(
nst[ic]), it[:, ic], cc[:, ic], ang[:, ic], 9999.9)
rotmat.append(rotmat_temp)
# Initialize accumulators: # not setup yet
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = -1.0
nxy = nx*ny
print('Working on the kriging')
# Report on progress from time to time:
if koption == 0:
nxy = nx*ny
nloop = nxy
irepo = max(1, min((nxy/10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd/10), 10000))
ddh = 0.0
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
for index in range(0, nloop):
if (int(index/irepo)*irepo) == index:
print(' currently on estimate ' + str(index))
if koption == 0:
iy = int((index)/nx)
ix = index - (iy)*nx
xloc = xmn + (ix)*xsiz
yloc = ymn + (iy)*ysiz
else:
ddh = 0.0
# TODO: pass the cross validation value
# Find the nearest samples within each octant: First initialize the counter arrays:
na = -1 # accounting for 0 as first index
dist.fill(1.0e+20)
nums.fill(-1)
current_node = (yloc, xloc)
# use kd tree for fast nearest data search
dist, close = tree.query(current_node, ndmax)
# remove any data outside search radius
close = close[dist < radius]
dist = dist[dist < radius]
nclose = len(dist)
# Is there enough samples?
if nclose < ndmin: # accounting for min index of 0
for i in range(0, ncut):
ccdfo[i] = UNEST
print('UNEST at ' + str(ix) + ',' + str(iy))
else:
# Loop over all the thresholds/categories:
for ic in range(0, ncut):
krig = True
if mik == 1 and ic >= 1:
krig = False
# Identify the close data (there may be a different number of data at
# each threshold because of constraint intervals); however, if
# there are no constraint intervals then this step can be avoided.
nca = -1
for ia in range(0, nclose):
j = int(close[ia]+0.5)
ii = actloc[j]
accept = True
if koption != 0 and (abs(x[j]-xloc) + abs(y[j]-yloc)).lt.EPSLON:
accept = False
if accept:
nca = nca + 1
vra[nca] = vr[ii, ic]
xa[nca] = x[j]
ya[nca] = y[j]
# If there are no samples at this threshold then use the global cdf:
if nca == -1:
if use_trend:
ccdf[ic] = trend[ny-iy-1, ix, ic]
else:
ccdf[ic] = gcdf[ic]
else:
# Now, only load the variogram, build the matrix,... if kriging:
neq = nclose + ktype
na = nclose
# Set up kriging matrices:
iin = -1 # accounting for first index of 0
for j in range(0, na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0, na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst[ic], c0[ic], PMX, cc[:, ic],
aa[:, ic], it[:, ic], ang[:, ic], anis[:, ic], rotmat[ic], maxcov[ic])
if ktype == 1:
iin = iin + 1
a[iin] = maxcov[ic]
r[j] = cova2(xloc, yloc, xa[j], ya[j], nst[ic], c0[ic], PMX, cc[:, ic],
aa[:, ic], it[:, ic], ang[:, ic], anis[:, ic], rotmat[ic], maxcov[ic])
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0, na):
iin = iin + 1
a[iin] = maxcov[ic]
iin = iin + 1
a[iin] = 0.0
r[neq-1] = maxcov[ic]
rr[neq-1] = r[neq]
# Solve the system:
if neq == 1:
ising = 0.0
s[0] = r[0] / a[0]
else:
s = ksol_numpy(neq, a, r)
# Finished kriging (if it was necessary):
# Compute Kriged estimate of cumulative probability:
sumwts = 0.0
ccdf[ic] = 0.0
for i in range(0, nclose):
ccdf[ic] = ccdf[ic] + vra[i]*s[i]
sumwts = sumwts + s[i]
if ktype == 0:
if use_trend == True:
ccdf[ic] = ccdf[ic] + \
(1.0-sumwts)*trend[ny-iy-1, ix, ic]
else:
ccdf[ic] = ccdf[ic] + (1.0-sumwts)*gcdf[ic]
# Keep looping until all the thresholds are estimated:
# Correct and write the distribution to the output file:
nk = nk + 1
ccdfo = ordrel(ivtype, ncut, ccdf)
# Write the IK CCDF for this grid node:
if koption == 0:
ikout[ny-iy-1, ix, :] = ccdfo
else:
print('TBD')
return ikout
def sgsim(df, xcol, ycol, vcol, wcol, scol, tmin, tmax, itrans, ismooth, dftrans, tcol, twtcol, zmin, zmax, ltail, ltpar, utail, utpar, nsim,
nx, xmn, xsiz, ny, ymn, ysiz, seed, ndmin, ndmax, nodmax, mults, nmult, noct, radius, radius1, sang1,
mxctx, mxcty, ktype, colocorr, sec_map, vario):
# Parameters from sgsim.inc
MAXNST = 2
MAXROT = 2
UNEST = -99.0
EPSLON = 1.0e-20
VERSION = 2.907
KORDEI = 12
MAXOP1 = KORDEI+1
MAXINT = 2**30
# Set other parameters
np.random.seed(seed)
nxy = nx*ny
sstrat = 0 # search data and nodes by default, turned off if unconditional
radsqd = radius * radius
sanis1 = radius1/radius
if ktype == 4:
varred = 1.0
# load the variogram
nst = int(vario['nst'])
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst, dtype=int)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = vario['hmin1']/vario['hmaj1']
if nst == 2:
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = vario['hmin2']/vario['hmaj2']
# Set the constants
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = MAXCTX * MAXCTY
MAXX = nx
MAXY = ny
MAXZ = 1 # assuming 2D for now
MXY = MAXX * MAXY
if MXY < 100:
MXY = 100
MAXNOD = nodmax
MAXSAM = ndmax
MAXKR1 = MAXNOD + MAXSAM + 1
# print('MAXKR1'); print(MAXKR1)
MAXKR2 = MAXKR1 * MAXKR1
MAXSBX = 1
if nx > 1:
MAXSBX = int(nx/2)
if MAXSBX > 50:
MAXSBX = 50
MAXSBY = 1
if ny > 1:
MAXSBY = int(ny/2)
if MAXSBY > 50:
MAXSBY = 50
MAXSBZ = 1
MAXSB = MAXSBX*MAXSBY*MAXSBZ
# Declare arrays
dist = np.zeros(ndmax)
nums = np.zeros(ndmax, dtype=int)
# Perform some quick checks
if nx > MAXX or ny > MAXY:
print('ERROR: available grid size: ' + str(MAXX) +
',' + str(MAXY) + ',' + str(MAXZ) + '.')
print(' you have asked for : ' + str(nx) +
',' + str(ny) + ',' + str(nz) + '.')
return sim
if ltail != 1 and ltail != 2:
print('ERROR invalid lower tail option ' + str(ltail))
print(' only allow 1 or 2 - see GSLIB manual ')
return sim
if utail != 1 and utail != 2 and utail != 4:
print('ERROR invalid upper tail option ' + str(ltail))
print(' only allow 1,2 or 4 - see GSLIB manual ')
return sim
if utail == 4 and utpar < 1.0:
print('ERROR invalid power for hyperbolic tail' + str(utpar))
print(' must be greater than 1.0!')
return sim
if ltail == 2 and ltpar < 0.0:
print('ERROR invalid power for power model' + str(ltpar))
print(' must be greater than 0.0!')
return sim
if utail == 2 and utpar < 0.0:
print('ERROR invalid power for power model' + str(utpar))
print(' must be greater than 0.0!')
return sim
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
vr_orig = np.copy(vr)
# print('size of data extract'); print(len(vr))
wt = []
wt = np.array(wt)
if wcol > -1:
wt = df_extract[wcol].values
else:
wt = np.ones(nd)
sec = []
sec = np.array(sec)
if scol > -1:
sec = df_extract[scol].values
if itrans == 1:
if ismooth == 1:
dftrans_extract = dftrans.loc[(
dftrans[tcol] >= tmin) & (dftrans[tcol] <= tmax)]
ntr = len(dftrans_extract)
vrtr = dftrans_extrac[tcol].values
if twtcol > -1:
vrgtr = dftrans_extrac[tcol].values
else:
vrgtr = np.ones(ntr)
else:
vrtr = df_extract[vcol].values
ntr = len(df_extract)
vrgtr = np.copy(wt)
twt = np.sum(vrgtr)
# sort
vrtr, vrgtr = dsortem(0, ntr, vrtr, 2, b=vrgtr)
# Compute the cumulative probabilities and write transformation table
twt = max(twt, EPSLON)
oldcp = 0.0
cp = 0.0
# print('ntr'); print(ntr)
for j in range(0, ntr):
cp = cp + vrgtr[j]/twt
w = (cp + oldcp)*0.5
vrg = gauinv(w)
oldcp = cp
# Now, reset the weight to the normal scores value:
vrgtr[j] = vrg
twt = np.sum(wt)
# Normal scores transform the data
for id in range(0, nd):
if itrans == 1:
vrr = vr[id]
j = dlocate(vrtr, 1, nd, vrr)
j = min(max(0, j), (nd-2))
vrg = dpowint(vrtr[j], vrtr[j+1], vrgtr[j],
vrgtr[j+1], vrr, 1.0)
if vrg < vrgtr[0]:
vrg = vrgtr[0]
if(vrg > vrgtr[nd-1]):
vrg = vrgtr[nd-1]
vr[id] = vrg
weighted_stats_orig = DescrStatsW(vr_orig, weights=wt)
orig_av = weighted_stats_orig.mean
orig_ss = weighted_stats_orig.var
weighted_stats = DescrStatsW(vr, weights=wt)
av = weighted_stats.mean
ss = weighted_stats.var
print('\n Data for SGSIM: Number of acceptable data = ' + str(nd))
print(' Number trimmed = ' + str(len(df) - nd))
print(' Weighted Average = ' +
str(round(orig_av, 4)))
print(' Weighted Variance = ' +
str(round(orig_ss, 4)))
print(' Weighted Transformed Average = ' + str(round(av, 4)))
print(' Weighted Transformed Variance = ' + str(round(ss, 4)))
# Read in secondary data
sim = np.random.rand(nx*ny)
index = 0
for ixy in range(0, nxy):
sim[index] = index
lvm = []
lvm = np.array(lvm)
if ktype >= 2:
#lvm = np.copy(sec_map.flatten())
ind = 0
lvm = np.zeros(nxy)
for iy in range(0, ny):
for ix in range(0, nx):
lvm[ind] = sec_map[ny-iy-1, ix]
ind = ind + 1
if ktype == 2 and itrans == 1:
for ixy in range(0, nxy):
# Do we to transform the secondary variable for a local mean?
vrr = lvm[ixy]
j = dlocate(vrtr, 1, ntr, vrr)
j = min(max(0, j), (ntr-2))
vrg = dpowint(vrtr[j], vrtr[j+1], vrgtr[j],
vrgtr[j+1], vrr, 1.0)
if vrg < vrgtr[0]:
vrg = vrgtr[0]
if(vrg > vrgtr[ntr-1]):
vrg = vrgtr[nd-1]
lvm[ixy] = vrg
av = np.average(lvm)
ss = np.var(lvm)
print(' Secondary Data: Number of data = ' + str(nx*ny))
print(' Equal Weighted Average = ' + str(round(av, 4)))
print(' Equal Weighted Variance = ' + str(round(ss, 4)))
# Do we need to work with data residuals? (Locally Varying Mean)
if ktype == 2:
sec = np.zeros(nd)
for idd in range(0, nd):
ix = getindex(nx, xmn, xsiz, x[idd])
iy = getindex(ny, ymn, ysiz, y[idd])
index = ix + (iy-1)*nx
sec[idd] = lvm[index]
# Calculation of residual moved to krige subroutine: vr(i)=vr(i)-sec(i)
# Do we need to get an external drift attribute for the data?
if ktype == 3:
for idd in range(0, nd):
if sec[i] != UNEST:
ix = getindx(nx, xmn, xsiz, x[idd])
iy = getindx(ny, ymn, ysiz, y[idd])
ind = ix + (iy)*nx
sec[ind] = lvm[ind]
# Transform the secondary attribute to normal scores?
if ktype == 4:
order_sec = np.zeros(nxy)
ind = 0
for ixy in range(0, nxy):
order_sec[ixy] = ind
ind = ind + 1
print(' Transforming Secondary Data with')
print(' variance reduction of ' + str(varred))
lvm, order_sec = dsortem(0, nxy, lvm, 2, b=order_sec)
oldcp = 0.0
cp = 0.0
for i in range(0, nxy):
cp = cp + (1.0/(nxy))
w = (cp + oldcp)/2.0
lvm[i] = gauinv(w)
lvm[i] = lvm[i] * varred
oldcp = cp
order_sec, lvm = dsortem(0, nxy, order_sec, 2, b=lvm)
# return np.reshape(lvm,(ny,nx)) # check the transform
# Set up the rotation/anisotropy matrices that are needed for the
# variogram and search.
print('Setting up rotation matrices for variogram and search')
if nst == 1:
rotmat = setrot(ang[0], ang[0], sang1, anis[0],
anis[0], sanis1, nst, MAXROT=2)
else:
rotmat = setrot(ang[0], ang[1], sang1, anis[0],
anis[1], sanis1, nst, MAXROT=2)
isrot = 2 # search rotation is appended as 3rd
rotmat_2d, maxcov = setup_rotmat2(
c0, nst, it, cc, ang) # will use one in the future
# print('MaxCov = ' + str(maxcov))
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0, nd))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
# Set up the covariance table and the spiral search:
cov_table, tmp, order, ixnode, iynode, nlooku, nctx, ncty = ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MXY,
xsiz, ysiz, isrot, nx, ny, nst, c0, cc, aa, it, ang, anis, rotmat, radsqd)
# print('Covariance Table'); print(cov_table)
# MAIN LOOP OVER ALL THE SIMULAUTIONS:
for isim in range(0, nsim):
# Work out a random path for this realization:
sim = np.random.rand(nx*ny)
order = np.zeros(nxy)
ind = 0
for ixy in range(0, nxy):
order[ixy] = ind
ind = ind + 1
# The multiple grid search works with multiples of 4 (yes, that is
# somewhat arbitrary):
if mults == 1:
for imult in range(0, nmult):
nny = int(max(1, ny/((imult+1)*4)))
nnx = int(max(1, nx/((imult+1)*4)))
# print('multi grid - nnx, nny'); print(nnx,nny)
jy = 1
jx = 1
for iy in range(0, nny):
if nny > 0:
jy = iy*(imult+1)*4
for ix in range(0, nnx):
if nnx > 0:
jx = ix*(imult+1)*4
index = jx + (jy-1)*nx
sim[index] = sim[index] - (imult+1)
# Initialize the simulation:
sim, order = dsortem(0, nxy, sim, 2, b=order)
sim.fill(UNEST)
print('Working on realization number ' + str(isim))
# Assign the data to the closest grid node:
TINY = 0.0001
for idd in range(0, nd):
# print('data'); print(x[idd],y[idd])
ix = getindex(nx, xmn, xsiz, x[idd])
iy = getindex(ny, ymn, ysiz, y[idd])
ind = ix + (iy-1)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
# print('xx, yy' + str(xx) + ',' + str(yy))
test = abs(xx-x[idd]) + abs(yy-y[idd])
# Assign this data to the node (unless there is a closer data):
if sstrat == 1:
if sim[ind] > 0.0:
id2 = int(sim[ind]+0.5)
test2 = abs(xx-x(id2)) + abs(yy-y(id2))
if test <= test2:
sim[ind] = idd
else:
sim[ind] = id2
# Assign a flag so that this node does not get simulated:
if sstrat == 0 and test <= TINY:
sim[ind] = 10.0*UNEST
# Now, enter data values into the simulated grid:
for ind in range(0, nxy):
idd = int(sim[ind]+0.5)
if idd > 0:
sim[ind] = vr[id]
irepo = max(1, min((nxy/10), 10000))
# MAIN LOOP OVER ALL THE NODES:
for ind in range(0, nxy):
if (int(ind/irepo)*irepo) == ind:
print(' currently on node ' + str(ind))
# Figure out the location of this point and make sure it has
# not been assigned a value already:
index = int(order[ind]+0.5)
if (sim[index] > (UNEST+EPSLON)) or (sim[index] < (UNEST*2.0)):
continue
iy = int((index)/nx)
ix = index - (iy)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
current_node = (yy, xx)
# print('Current_node'); print(current_node)
# Now, we'll simulate the point ix,iy,iz. First, get the close data
# and make sure that there are enough to actually simulate a value,
# we'll only keep the closest "ndmax" data, and look for previously
# simulated grid nodes:
if sstrat == 0:
# print('searching for nearest data')
na = -1 # accounting for 0 as first index
if ndmax == 1:
dist = np.zeros(1)
nums = np.zeros(1)
# use kd tree for fast nearest data search
dist[0], nums[0] = tree.query(current_node, ndmax)
else:
dist, nums = tree.query(current_node, ndmax)
# remove any data outside search radius
# print('nums'); print(nums)
# print('dist'); print(dist)
na = len(dist)
nums = nums[dist < radius]
dist = dist[dist < radius]
na = len(dist)
if na < ndmin:
continue # bail if not enough data
# print('Found ' + str(na) + 'neighbouring data')
# print('node search inputs')
# print('nodmax ' + str(nodmax))
# print('ixnode'); print(ixnode)
ncnode, icnode, cnodev, cnodex, cnodey = srchnd(
ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST)
# print('srchnd'); print(ncnode,icnode,cnodev,cnodex,cnodey)
# print('Result of srchnd, cnodex = '); print(cnodex)
nclose = na
# print('srch node, nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('nums'); print(nums)
# Calculate the conditional mean and standard deviation. This will be
# done with kriging if there are data, otherwise, the global mean and
# standard deviation will be used:
if ktype == 2:
gmean = lvm[index]
else:
gmean = 0.0
if nclose+ncnode < 1:
cmean = gmean
cstdev = 1.0
# Perform the kriging. Note that if there are fewer than four data
# then simple kriging is prefered so that the variance of the
# realization does not become artificially inflated:
else:
lktype = ktype
if ktype == 1 and (nclose+ncnode) < 4:
lktype = 0
cmean, cstdev = krige(ix, iy, nx, ny, xx, yy, lktype, x, y, vr, sec, colocorr, lvm, nums, cov_table, nctx, ncty, icnode, ixnode, iynode, cnodev, cnodex, cnodey,
nst, c0, 9999.9, cc, aa, it, ang, anis, rotmat_2d, maxcov, MAXCTX, MAXCTY, MAXKR1,
MAXKR2)
# Draw a random number and assign a value to this node:
p = np.random.rand()
xp = gauinv(p)
sim[index] = xp * cstdev + cmean
# print('simulated value = ' + str(sim[index]))
# Quick check for far out results:
if abs(cmean) > 5.0 or abs(cstdev) > 5.0 or abs(sim[index]) > 6.0:
print('WARNING: grid node location: ' + str(ix) + ',' + str(iy))
print(' conditional mean and stdev: ' +
str(cmean) + ',' + str(cstdev))
print(' simulated value: ' + str(sim[index]))
# Do we need to reassign the data to the grid nodes?
if sstrat == 0:
print('Reassigning data to nodes')
for iid in range(0, nd):
ix = getindex(nx, xmn, xsiz, x[iid])
iy = getindex(ny, ymn, ysiz, y[iid])
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
ind = ix + (iy-1)*nx
test = abs(xx-x[iid])+abs(yy-y[iid])
if test <= TINY:
sim[ind] = vr[iid]
# Back transform each value and write results:
ne = 0
av = 0.0
ss = 0.0
for ind in range(0, nxy):
iy = int((index-1)/nx) + 1
ix = index - (iy-1)*nx
simval = sim[ind]
if simval > -9.0 and simval < 9.0:
ne = ne + 1
av = av + simval
ss = ss + simval*simval
if itrans == 1 and simval > (UNEST+EPSLON):
simval = backtr_value(
simval, vrtr, vrgtr, zmin, zmax, ltail, ltpar, utail, utpar)
if simval < zmin:
simval = zmin
if simval > zmax:
simval = zmax
sim[ind] = simval
# print('simulated value = ' + str(sim[ind]) + ' at location index = ' + str(ind))
av = av / max(ne, 1.0)
ss = (ss / max(ne, 1.0)) - av * av
print('\n Realization ' + str(isim) + ': number = ' + str(ne))
print(' mean = ' +
str(round(av, 4)) + ' (close to 0.0?)')
print(' variance = ' +
str(round(ss, 4)) + ' (close to gammabar(V,V)? approx. 1.0)')
# END MAIN LOOP OVER SIMULATIONS:
sim_out = np.zeros((ny, nx))
for ind in range(0, nxy):
iy = int((ind)/nx)
ix = ind - (iy)*nx
sim_out[ny-iy-1, ix] = sim[ind]
return sim_out
def sisim(df, xcol, ycol, vcol, ivtype, koption, ncut, thresh, gcdf, trend, tmin, tmax, zmin, zmax, ltail, ltpar, middle, mpar, utail, utpar, nx, xmn, xsiz, ny, ymn, ysiz, seed, ndmin,
ndmax, nodmax, mults, nmult, noct, radius, ktype, vario):
"""A 2D version of GSLIB's SISIM Indicator Simulation program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (March, 2019). WARNING: only tested for cateogrical ktype 0, 1 and 2 (locally variable proportion).
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column (cateogorical or continuous - note continuous is untested)
:param ivtype: variable type, 0 - categorical, 1 - continuous
:param koption: kriging option, 0 - estimation, 1 - cross validation (under construction)
:param ncut: number of categories or continuous thresholds
:param thresh: an ndarray with the category labels or continuous thresholds
:param gcdf: global CDF, not used if trend is present
:param trend: an ndarray [ny,ny,ncut] with the local trend proportions or cumulative CDF values
:param tmin: property trimming limit
:param tmax: property trimming limit
:param nx: definition of the grid system (x axis)
:param xmn: definition of the grid system (x axis)
:param xsiz: definition of the grid system (x axis)
:param ny: definition of the grid system (y axis)
:param ymn: definition of the grid system (y axis)
:param ysiz: definition of the grid system (y axis)
:param nxdis: number of discretization points for a block
:param nydis: number of discretization points for a block
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype: kriging type, 0 - simple kriging and 1 - ordinary kriging
:param vario: list with all of the indicator variograms (sill of 1.0) in consistent order with above parameters
:return:
"""
# Checks
if utail == 3 or ltail == 3 or middle == 3:
print('ERROR - distribution extrapolation option 3 with table is not available')
return sim_out
if xcol == "" or ycol == "":
print('ERROR - must have x and y column in the DataFrame')
return sim_out
# Set parameters from the include
UNEST = -99.0
EPSLON = 1.0e-20
VERSION = 0.001
np.random.seed(seed)
colocorr = 0.0 # no collocated cokriging
lvm = 0 # no kriging with a locally variable mean
sec = []
sec = np.array(sec) # no secondary data
ng = 0 # no tabulated values
# Find the needed paramters:
PMX = 9999.9
MAXSAM = ndmax + 1
MAXEQ = MAXSAM + 1
nxy = nx*ny
mik = 0 # full indicator kriging
use_trend = False
trend1d = np.zeros((nxy, 1)) # no trend make a dummy trend
if trend.shape[0] == nx and trend.shape[1] == ny and trend.shape[2] == ncut:
trend1d = np.zeros((nxy, ncut))
use_trend = True
index = 0
for iy in range(0, ny):
for ix in range(0, nx):
for ic in range(0, ncut):
trend1d[index, ic] = trend[ny-iy-1, ix, ic] # copy trend
index = index + 1
MAXORD = nxy
MAXNOD = nodmax
cnodeiv = np.zeros((ncut+1, MAXNOD))
tmp = np.zeros(MAXORD)
sstrat = 0 # search data and nodes by default, turned off if unconditional
sang1 = 0 # using isotropic search now
sanis1 = 1.0
# No covariance lookup table
mxctx = int(radius/xsiz)*2+1
mxcty = int(radius/xsiz)*2+1
# print('cov table / spiral search nx, ny '); print(mxctx); print(mxcty)
MAXCTX = mxctx
MAXCTY = mxcty
MAXCXY = MAXCTX * MAXCTY
# Grid extents
MAXX = nx
MAXY = ny
MXY = MAXX * MAXY
# Kriging system
MAXKR1 = 2 * MAXNOD + 2 * MAXSAM + 1
MAXKR2 = MAXKR1 * MAXKR1
MAXSBX = 1
if nx > 1:
MAXSBX = int(nx/2)
if MAXSBX > 50:
MAXSBX = 50
MAXSBY = 1
if ny > 1:
MAXSBY = int(ny/2)
if MAXSBY > 50:
MAXSBY = 50
# print('ncut'); print(ncut)
# load the variogram
MAXNST = 2
nst = np.zeros(ncut, dtype=int)
c0 = np.zeros(ncut)
cc = np.zeros((ncut, MAXNST))
aa = np.zeros((ncut, MAXNST), dtype=int)
it = np.zeros((ncut, MAXNST), dtype=int)
ang = np.zeros((ncut, MAXNST))
anis = np.zeros((ncut, MAXNST))
# print('varios - 1 vario'); print(vario[1])
for icut in range(0, ncut):
# print('icut'); print(icut)
nst[icut] = int(vario[icut]['nst'])
c0[icut] = vario[icut]['nug']
cc[icut, 0] = vario[icut]['cc1']
it[icut, 0] = vario[icut]['it1']
ang[icut, 0] = vario[icut]['azi1']
aa[icut, 0] = vario[icut]['hmaj1']
anis[icut, 0] = vario[icut]['hmin1']/vario[icut]['hmaj1']
if nst[icut] == 2:
cc[icut, 1] = vario[icut]['cc2']
it[icut, 1] = vario[icut]['it2']
ang[icut, 1] = vario[icut]['azi2']
aa[icut, 1] = vario[icut]['hmaj2']
anis[icut, 1] = vario[icut]['hmin2']/vario[icut]['hmaj2']
# print('check loaded cov model- icut '); print(icut)
# print(cc[icut],aa[icut],it[icut],ang[icut],anis[icut])
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
MAXDAT = len(df_extract)
nd = MAXDAT
MAXCUT = ncut
MAXNST = 2
MAXROT = MAXNST*MAXCUT + 1
ikout = np.zeros((nx, ny, ncut))
maxcov = np.zeros(ncut)
# Allocate the needed memory:
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXEQ)
rr = np.zeros(MAXEQ)
s = np.zeros(MAXEQ)
a = np.zeros(MAXEQ*MAXEQ)
ikmap = np.zeros((nx, ny, ncut))
vr = np.zeros((MAXDAT, MAXCUT+1))
nviol = np.zeros(MAXCUT)
aviol = np.zeros(MAXCUT)
xviol = np.zeros(MAXCUT)
ccdf = np.zeros(ncut)
ccdfo = np.zeros(ncut)
ikout = np.zeros((nx, ny, ncut))
x = df_extract[xcol].values
y = df_extract[ycol].values
v = df_extract[vcol].values
MAXTAB = MAXDAT + MAXCUT # tabulated probabilities not used
gcut = np.zeros(MAXTAB)
# The indicator data are constructed knowing the thresholds and the
# data value.
# print('ncut'); print(ncut)
if ivtype == 0:
for icut in range(0, ncut):
vr[:, icut] = np.where(
(v <= thresh[icut] + 0.5) & (v > thresh[icut] - 0.5), '1', '0')
else:
for icut in range(0, ncut):
vr[:, icut] = np.where(v <= thresh[icut], '1', '0')
vr[:, ncut] = v
# print('loaded data '); print(vr)
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0, MAXDAT))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
# Summary statistics of the input data
avg = vr[:, ncut].mean()
stdev = vr[:, ncut].std()
ss = stdev**2.0
vrmin = vr[:, ncut].min()
vrmax = vr[:, ncut].max()
print('Data for IK3D: Variable column ' + str(vcol))
print(' Number = ' + str(MAXDAT))
ndh = MAXDAT
# need to set up data at node locations
actloc = np.zeros(MAXDAT, dtype=int)
for i in range(1, MAXDAT):
actloc[i] = i
# Set up the rotation/anisotropy matrices that are needed for the
# variogram and search:
print('Setting up rotation matrices for variogram and search')
radsqd = radius * radius
rotmat = []
for ic in range(0, ncut):
rotmat_temp, maxcov[ic] = setup_rotmat(
c0[ic], int(nst[ic]), it[ic], cc[ic], ang[ic], 9999.9)
rotmat.append(rotmat_temp)
# return rotmat
# Set up the covariance table and the spiral search based just on the first variogram
# This is ok as we are not using the covariance look up table, just spiral search for previous nodes
isrot = MAXNST*MAXCUT + 1 # note I removed anisotropic search here
# print('ang[0]'); print(ang[0])
if nst[0] == 1:
global_rotmat = setrot(
ang[0, 0], ang[0, 0], sang1, anis[0, 0], anis[0, 0], sanis1, nst[0], MAXROT=2)
else:
global_rotmat = setrot(
ang[0, 0], ang[1, 0], sang1, anis[0, 0], anis[1, 0], sanis1, nst[0], MAXROT=2)
cov_table, tmp2, order, ixnode, iynode, nlooku, nctx, ncty = ctable(MAXNOD, MAXCXY, MAXCTX, MAXCTY, MXY,
xsiz, ysiz, isrot, nx, ny, nst[0], c0[0], cc[0], aa[0], it[0], ang[0], anis[0], global_rotmat, radsqd)
# print('spiral search number nodes '); print(nlooku)
# print('ixnode,iynode'); print(ixnode,iynode)
# Initialize accumulators: # not setup yet
nk = 0
xk = 0.0
vk = 0.0
for icut in range(0, ncut):
nviol[icut] = 0
aviol[icut] = 0.0
xviol[icut] = -1.0
# print('Working on the kriging')
# Report on progress from time to time:
if koption == 0:
nxy = nx*ny
nloop = nxy
irepo = max(1, min((nxy/10), 10000))
else:
nloop = 10000000
irepo = max(1, min((nd/10), 10000))
ddh = 0.0
# MAIN LOOP OVER ALL THE SIMULAUTIONS:
# for isim in range(0,nsim): # will add multiple realizations soon
# Work out a random path for this realization:
sim = np.random.rand(nx*ny)
order = np.zeros(nxy)
ind = 0
for ixy in range(0, nxy):
order[ixy] = ind
ind = ind + 1
# Multiple grid search works with multiples of 4 (yes, that is
# soat arbitrary):
if mults == 1:
for imult in range(0, nmult):
nny = int(max(1, ny/((imult+1)*4)))
nnx = int(max(1, nx/((imult+1)*4)))
# print('multi grid - nnx, nny'); print(nnx,nny)
jy = 1
jx = 1
for iy in range(0, nny):
if nny > 0:
jy = iy*(imult+1)*4
for ix in range(0, nnx):
if nnx > 0:
jx = ix*(imult+1)*4
index = jx + (jy-1)*nx
sim[index] = sim[index] - (imult+1)
# Inlize the simulation:
sim, order = dsortem(0, nxy, sim, 2, b=order)
sim.fill(UNEST)
tmp.fill(0.0)
print('Working on a single realization, seed ' + str(seed))
# print('Random Path'); print(order)
# As the data to the closest grid node:
TINY = 0.0001
for idd in range(0, nd):
# print('data'); print(x[idd],y[idd])
ix = getindex(nx, xmn, xsiz, x[idd])
iy = getindex(ny, ymn, ysiz, y[idd])
ind = ix + (iy-1)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
# print('xx, yy' + str(xx) + ',' + str(yy))
test = abs(xx-x[idd]) + abs(yy-y[idd])
# As this data to the node (unless there is a closer data):
if sstrat == 1 or (sstrat == 0 and test <= TINY):
if sim[ind] > UNEST:
id2 = int(sim[ind]+0.5)
test2 = abs(xx-x[id2]) + abs(yy-y[id2])
if test <= test2:
sim[ind] = idd
else:
sim[ind] = idd
# As a flag so that this node does not get simulated:
# Another data values into the simulated grid:
for ind in range(0, nxy):
idd = int(sim[ind]+0.5)
if idd > 0:
sim[ind] = vr[idd]
else:
tmp[ind] = sim[ind]
sim[ind] = UNEST
irepo = max(1, min((nxy/10), 10000))
# LOOP OVER ALL THE NODES:
for ind in range(0, nxy):
if (int(ind/irepo)*irepo) == ind:
print(' currently on node ' + str(ind))
# Find the index on the random path, check if assigned data and get location
index = int(order[ind]+0.5)
if (sim[index] > (UNEST+EPSLON)) or (sim[index] < (UNEST*2.0)):
continue
iy = int((index)/nx)
ix = index - (iy)*nx
xx = xmn + (ix)*xsiz
yy = ymn + (iy)*ysiz
current_node = (yy, xx)
# print('Current_node'); print(current_node)
# Now we'll simulate the point ix,iy,iz. First, get the close data
# and make sure that there are enough to actually simulate a value,
# we'll only keep the closest "ndmax" data, and look for previously
# simulated grid nodes:
if sstrat == 0:
# print('searching for nearest data')
na = -1 # accounting for 0 as first index
if ndmax == 1:
dist = np.zeros(1)
nums = np.zeros(1)
# use kd tree for fast nearest data search
dist[0], nums[0] = tree.query(current_node, ndmax)
else:
dist, nums = tree.query(current_node, ndmax)
# remove any data outside search radius
# print('nums'); print(nums)
# print('dist'); print(dist)
na = len(dist)
nums = nums[dist < radius]
dist = dist[dist < radius]
na = len(dist)
if na < ndmin:
continue # bail if not enough data
# print('Found ' + str(na) + 'neighbouring data')
# print('node search inputs')
# print('nodmax ' + str(nodmax))
# print('ixnode'); print(ixnode)
# Indicator transform the nearest node data
# print('start node search')
ncnode, icnode, cnodev, cnodex, cnodey = srchnd(
ix, iy, nx, ny, xmn, ymn, xsiz, ysiz, sim, noct, nodmax, ixnode, iynode, nlooku, nctx, ncty, UNEST)
if ncnode > 0:
for icut in range(0, ncut):
cnodeiv[icut, :] = np.where(
(cnodev <= thresh[icut] + 0.5) & (cnodev > thresh[icut] - 0.5), '1', '0')
else:
for icut in range(0, ncut):
cnodeiv[icut, :] = np.where(cnodev <= thresh[icut], '1', '0')
cnodeiv[ncut, :] = cnodev
# print('indicator transformed nearest nodes'); print(cnodeiv)
# print('srchnd'); print(ncnode,icnode,cnodev,cnodex,cnodey)
# print('Result of srchnd, cnodex = '); print(cnodex)
nclose = na
# print('*****srch node, nclose ' + str(nclose) + ', ncnode ' + str(ncnode))
# print('near data'); print(nums)
# print('near data distance'); print(dist)
# print('nums'); print(nums)
# What cdf value are we looking for?
zval = UNEST
cdfval = np.random.rand()
# Use the global distribution?
# check inputs
# print('nst'); print(nst)
if nclose + ncnode <= 0:
# print('nclose & ncnode'); print(nclose, ncnode)
zval = beyond(ivtype, ncut, thresh, gcdf, ng, gcut, gcdf, zmin,
zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
else:
# print('kriging')
# Estimate the local distribution by indicator kriging:
# print('maxcov'); print(maxcov)
for ic in range(0, ncut):
# print('check kriging cov model- icut '); print(ic)
# print('node data values for kriging'); print(cnodev)
# print(cc[ic],aa[ic],it[ic],ang[ic],anis[ic],rotmat[ic],maxcov[ic])
# ccdf([ic] = krige(ix,iy,iz,xx,yy,zz,ic,cdf(ic),MAXCTX,MAXCTY,MAXCTZ,MAXKR1,ccdf(ic),MAXROT)
if ktype == 0:
gmean = gcdf[ic]
elif ktype == 2:
gmean = trend1d[index, ic]
else:
gmean = 0 # if locally variable mean it is set from trend in ikrige, otherwise not used
# print('gmean'); print(gmean)
ccdf[ic], cstdev = ikrige(ix, iy, nx, ny, xx, yy, ktype, x, y, vr[:, ic], sec, colocorr, gmean, trend[:, ic], nums, cov_table, nctx, ncty,
icnode, ixnode, iynode, cnodeiv[ic], cnodex, cnodey, nst[ic], c0[
ic], 9999.9, cc[ic], aa[ic], it[ic], ang[ic], anis[ic],
rotmat[ic], maxcov[ic], MAXCTX, MAXCTY, MAXKR1, MAXKR2)
# print('ccdf'); print(ccdf)
# Correct order relations:
ccdfo = ordrel(ivtype, ncut, ccdf)
# Draw from the local distribution:
zval = beyond(ivtype, ncut, thresh, ccdfo, ng, gcut, gcdf, zmin,
zmax, ltail, ltpar, middle, mpar, utail, utpar, zval, cdfval)
sim[index] = zval
# print('zval'); print(zval)
# END MAIN LOOP OVER SIMULATIONS:
sim_out = np.zeros((ny, nx))
for ind in range(0, nxy):
iy = int((ind)/nx)
ix = ind - (iy)*nx
sim_out[ny-iy-1, ix] = sim[ind]
return sim_out
def kb2d_locations(
df,
xcol,
ycol,
vcol,
tmin,
tmax,
df_loc,
xcol_loc,
ycol_loc,
ndmin,
ndmax,
radius,
ktype,
skmean,
vario,
):
"""GSLIB's KB2D program (Deutsch and Journel, 1998) converted from the
original Fortran to Python by Michael Pyrcz, the University of Texas at
Austin (Jan, 2019). Version for kriging at a set of spatial locations.
:param df: pandas DataFrame with the spatial data
:param xcol: name of the x coordinate column
:param ycol: name of the y coordinate column
:param vcol: name of the property column
:param tmin: property trimming limit
:param tmax: property trimming limit
:param df_loc: pandas DataFrame with the locations to krige
:param xcol: name of the x coordinate column for locations to krige
:param ycol: name of the y coordinate column for locations to krige
:param ndmin: minimum number of data points to use for kriging a block
:param ndmax: maximum number of data points to use for kriging a block
:param radius: maximum isotropic search radius
:param ktype:
:param skmean:
:param vario:
:return:
"""
# Constants
UNEST = -999.
EPSLON = 1.0e-10
VERSION = 2.907
first = True
PMX = 9999.0
MAXSAM = ndmax + 1
MAXKD = MAXSAM + 1
MAXKRG = MAXKD * MAXKD
# load the variogram
nst = vario['nst']
cc = np.zeros(nst)
aa = np.zeros(nst)
it = np.zeros(nst)
ang = np.zeros(nst)
anis = np.zeros(nst)
c0 = vario['nug']
cc[0] = vario['cc1']
it[0] = vario['it1']
ang[0] = vario['azi1']
aa[0] = vario['hmaj1']
anis[0] = vario['hmin1']/vario['hmaj1']
if nst == 2:
cc[1] = vario['cc2']
it[1] = vario['it2']
ang[1] = vario['azi2']
aa[1] = vario['hmaj2']
anis[1] = vario['hmin2']/vario['hmaj2']
# Allocate the needed memory:
xa = np.zeros(MAXSAM)
ya = np.zeros(MAXSAM)
vra = np.zeros(MAXSAM)
dist = np.zeros(MAXSAM)
nums = np.zeros(MAXSAM)
r = np.zeros(MAXKD)
rr = np.zeros(MAXKD)
s = np.zeros(MAXKD)
a = np.zeros(MAXKRG)
klist = np.zeros(len(df_loc)) # list of kriged estimates
vlist = np.zeros(len(df_loc))
# Load the data
# trim values outside tmin and tmax
df_extract = df.loc[(df[vcol] >= tmin) & (df[vcol] <= tmax)]
nd = len(df_extract)
ndmax = min(ndmax, nd)
x = df_extract[xcol].values
y = df_extract[ycol].values
vr = df_extract[vcol].values
# Load the estimation loactions
nd_loc = len(df_loc)
x_loc = df_loc[xcol].values
y_loc = df_loc[ycol].values
vr_loc = df_loc[vcol].values
# Make a KDTree for fast search of nearest neighbours
dp = list((y[i], x[i]) for i in range(0, nd))
data_locs = np.column_stack((y, x))
tree = sp.cKDTree(data_locs, leafsize=16, compact_nodes=True,
copy_data=False, balanced_tree=True)
# Summary statistics for the data after trimming
avg = vr.mean()
stdev = vr.std()
ss = stdev**2.0
vrmin = vr.min()
vrmax = vr.max()
# Initialize accumulators:
cbb = 0.0
rad2 = radius*radius
# Calculate Block Covariance. Check for point kriging.
rotmat, maxcov = setup_rotmat(c0, nst, it, cc, ang, PMX)
cov = cova2(0.0, 0.0, 0.0, 0.0, nst, c0, PMX, cc,
aa, it, ang, anis, rotmat, maxcov)
# Keep this value to use for the unbiasedness constraint:
unbias = cov
cbb = cov
first = False
# MAIN LOOP OVER ALL THE BLOCKS IN THE GRID:
nk = 0
ak = 0.0
vk = 0.0
for idata in range(len(df_loc)):
print('Working on location ' + str(idata))
xloc = x_loc[idata]
yloc = y_loc[idata]
current_node = (yloc, xloc)
# Find the nearest samples within each octant: First initialize
# the counter arrays:
na = -1 # accounting for 0 as first index
dist.fill(1.0e+20)
nums.fill(-1)
# use kd tree for fast nearest data search
dist, nums = tree.query(current_node, ndmax)
# remove any data outside search radius
na = len(dist)
nums = nums[dist < radius]
dist = dist[dist < radius]
na = len(dist)
# Is there enough samples?
if na + 1 < ndmin: # accounting for min index of 0
est = UNEST
estv = UNEST
print('UNEST for Data ' + str(idata) +
', at ' + str(xloc) + ',' + str(yloc))
else:
# Put coordinates and values of neighborhood samples into xa,ya,vra:
for ia in range(0, na):
jj = int(nums[ia])
xa[ia] = x[jj]
ya[ia] = y[jj]
vra[ia] = vr[jj]
# Handle the situation of only one sample:
if na == 0: # accounting for min index of 0 - one sample case na = 0
cb1 = cova2(xa[0], ya[0], xa[0], ya[0], nst, c0,
PMX, cc, aa, it, ang, anis, rotmat, maxcov)
xx = xa[0] - xloc
yy = ya[0] - yloc
# Establish Right Hand Side Covariance:
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc,
aa, it, ang, anis, rotmat, maxcov)
if ktype == 0:
s[0] = cb/cbb
est = s[0]*vra[0] + (1.0-s[0])*skmean
estv = cbb - s[0] * cb
else:
est = vra[0]
estv = cbb - 2.0*cb + cb1
else:
# Solve the Kriging System with more than one sample:
neq = na + ktype # accounting for first index of 0
# print('NEQ' + str(neq))
nn = (neq + 1)*neq/2
# Set up kriging matrices:
iin = -1 # accounting for first index of 0
for j in range(0, na):
# Establish Left Hand Side Covariance Matrix:
for i in range(0, na): # was j - want full matrix
iin = iin + 1
a[iin] = cova2(xa[i], ya[i], xa[j], ya[j], nst,
c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)
if ktype == 1:
iin = iin + 1
a[iin] = unbias
xx = xa[j] - xloc
yy = ya[j] - yloc
# Establish Right Hand Side Covariance:
cb = cova2(xx, yy, 0.0, 0.0, nst, c0, PMX, cc,
aa, it, ang, anis, rotmat, maxcov)
r[j] = cb
rr[j] = r[j]
# Set the unbiasedness constraint:
if ktype == 1:
for i in range(0, na):
iin = iin + 1
a[iin] = unbias
iin = iin + 1
a[iin] = 0.0
r[neq-1] = unbias
rr[neq-1] = r[neq]
# Solve the Kriging System:
# print('NDB' + str(ndb))
# print('NEQ' + str(neq) + ' Left' + str(a) + ' Right' + str(r))
# stop
s = ksol_numpy(neq, a, r)
ising = 0 # need to figure this out
# print('weights' + str(s))
# stop
# Write a warning if the matrix is singular:
if ising != 0:
print('WARNING KB2D: singular matrix')
print(' for block' +
str(ix) + ',' + str(iy) + ' ')
est = UNEST
estv = UNEST
else:
# Compute the estimate and the kriging variance:
est = 0.0
estv = cbb
sumw = 0.0
if ktype == 1:
estv = estv - (s[na])*unbias
for i in range(0, na):
sumw = sumw + s[i]
est = est + s[i]*vra[i]
estv = estv - s[i]*rr[i]
if ktype == 0:
est = est + (1.0-sumw)*skmean
klist[idata] = est
vlist[idata] = estv
if est > UNEST:
nk = nk + 1
ak = ak + est
vk = vk + est*est
# END OF MAIN LOOP OVER ALL THE BLOCKS:
if nk >= 1:
ak = ak / float(nk)
vk = vk/float(nk) - ak*ak
print(' Estimated ' + str(nk) + ' blocks ')
print(' average ' + str(ak) + ' variance ' + str(vk))
return klist, vlist
# Partial Correlation in Python (clone of Matlab's partialcorr)
# This uses the linear regression approach to compute the partial correlation
# (might be slow for a huge number of variables). The algorithm is detailed here:
# http://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
# Taking X and Y two variables of interest and Z the matrix with all the variable minus {X, Y},
# the algorithm can be summarized as
# 1) perform a normal linear least-squares regression with X as the target and Z as the predictor
# 2) calculate the residuals in Step #1
# 3) perform a normal linear least-squares regression with Y as the target and Z as the predictor
# 4) calculate the residuals in Step #3
# 5) calculate the correlation coefficient between the residuals from Steps #2 and #4;
# The result is the partial correlation between X and Y while controlling for the effect of Z
# Date: Nov 2014
# Author: Fabian Pedregosa-Izquierdo, f@bianp.net
# Testing: Valentina Borghesani, valentinaborghesani@gmail.com
def partial_corr(C):
# Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
# for the remaining variables in C.
# Parameters
# C : array-like, shape (n, p)
# Array with the different variables. Each column of C is taken as a variable
# Returns
# P : array-like, shape (p, p)
# P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
# for the remaining variables in C.
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
def semipartial_corr(C): # Michael Pyrcz modified the function above by Fabian Pedregosa-Izquierdo, f@bianp.net for semipartial correlation
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
res_j = C[:, j] - C[:, idx].dot(beta_i)
res_i = C[:, i] # just use the value, not a residual
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
|
from collections import deque
chocolate = [int(num) for num in input().split(",")]
milk = deque([int(num) for num in input().split(",")])
milkshakes = 0
milkshakes_success = False
while chocolate and milk:
current_chocolate = chocolate[-1]
current_milk = milk[0]
if current_chocolate <= 0 and current_milk <= 0:
chocolate.pop()
milk.popleft()
continue
if current_chocolate <= 0:
chocolate.pop()
continue
elif current_milk <= 0:
milk.popleft()
continue
if current_chocolate == current_milk:
chocolate.pop()
milk.popleft()
milkshakes += 1
if milkshakes == 5:
milkshakes_success = True
break
else:
milk.append(milk.popleft())
current_chocolate -= 5
if milkshakes_success:
print("Great! You made all the chocolate milkshakes needed!")
else:
print("Not enough milkshakes.")
if chocolate:
print(f"Chocolate: {', '.join([str(x) for x in chocolate])}")
else:
print("Chocolate: empty")
if milk:
print(f"Milk: {', '.join([str(x) for x in milk])}")
else:
print("Milk: empty")
|
# -*- coding: utf-8 -*-
""" Language, locale and alphabet encapsulation module
Copyright (C) 2020 Miðeind ehf.
Original author: Vilhjálmur Þorsteinsson
The GNU General Public License, version 3, applies to this software.
For further information, see https://github.com/mideind/Netskrafl
The classes in this module encapsulate particulars of supported
languages, including the character set, scores, tiles in the
initial bag, sorting, etc.
Currently the only supported language is Icelandic.
"""
import functools
class Alphabet:
""" This implementation of the Alphabet class encapsulates particulars of the Icelandic
language. Other languages can be supported by modifying or subclassing this class.
"""
# Sort ordering of allowed Icelandic letters
def __init__(self):
pass
order = u'aábdðeéfghiíjklmnoóprstuúvxyýþæö'
# Upper case version of the order string
upper = u'AÁBDÐEÉFGHIÍJKLMNOÓPRSTUÚVXYÝÞÆÖ'
# All tiles including wildcard '?'
all_tiles = order + u'?'
# Sort ordering of all valid letters
full_order = u'aábcdðeéfghiíjklmnoópqrstuúvwxyýzþæö'
# Upper case version of the full order string
full_upper = u'AÁBCDÐEÉFGHIÍJKLMNOÓPQRSTUÚVWXYÝZÞÆÖ'
# Map letters to bits
letter_bit = {letter: 1 << ix for ix, letter in enumerate(order)}
# Locale collation (sorting) map, initialized in _init()
_lcmap = None # Case sensitive
_lcmap_nocase = None # Case insensitive
@staticmethod
def bit_pattern(word):
""" Return a pattern of bits indicating which letters are present in the word """
return functools.reduce(lambda x, y: x | y, [Alphabet.letter_bit[c] for c in word], 0)
@staticmethod
def bit_of(c):
""" Returns the bit corresponding to a character in the alphabet """
return Alphabet.letter_bit[c]
@staticmethod
def all_bits_set():
""" Return a bit pattern where the bits for all letters in the Alphabet are set """
return 2 ** len(Alphabet.order) - 1
@staticmethod
def lowercase(ch):
""" Convert an uppercase character to lowercase """
return Alphabet.full_order[Alphabet.full_upper.index(ch)]
@staticmethod
def tolower(s):
""" Return the argument string converted to lowercase """
return u''.join([Alphabet.lowercase(c) if c in Alphabet.full_upper else c for c in s])
@staticmethod
def sort(l):
""" Sort a list in-place by lexicographic ordering according to this Alphabet """
l.sort(key=Alphabet.sortkey)
@staticmethod
def sorted(l):
""" Return a list sorted by lexicographic ordering according to this Alphabet """
return sorted(l, key=Alphabet.sortkey)
@staticmethod
def string_subtract(a, b):
""" Subtract all letters in b from a, counting each instance separately """
# Note that this cannot be done with sets, as they fold multiple letter instances into one
lcount = [a.count(c) - b.count(c) for c in Alphabet.all_tiles]
return u''.join(
[
Alphabet.all_tiles[ix] * lcount[ix]
for ix in range(len(lcount))
if lcount[ix] > 0
]
)
# noinspection PyUnusedLocal
@staticmethod
def format_timestamp(ts):
""" Return a timestamp formatted as a readable string """
# Currently always returns the full ISO format: YYYY-MM-DD HH:MM:SS
return u"" + ts.isoformat(' ')[0:19]
# noinspection PyUnusedLocal
@staticmethod
def format_timestamp_short(ts):
""" Return a timestamp formatted as a readable string """
# Returns a short ISO format: YYYY-MM-DD HH:MM
return u"" + ts.isoformat(' ')[0:16]
@staticmethod
def _init():
""" Create a collation (sort) mapping for the Icelandic language """
lcmap = [i for i in range(0, 256)]
def rotate(letter, sort_after):
""" Modifies the lcmap so that the letter is sorted after the indicated letter """
sort_as = lcmap[sort_after] + 1
letter_val = lcmap[letter]
# We only support the case where a letter is moved forward in the sort order
if letter_val > sort_as:
for i in range(0, 256):
if (lcmap[i] >= sort_as) and (lcmap[i] < letter_val):
lcmap[i] += 1
lcmap[letter] = sort_as
def adjust(s):
""" Ensure that the sort order in the lcmap is in ascending order as in s """
# This does not need to be terribly efficient as the code is
# only run once, during initialization
for i in range(1, len(s) - 1):
rotate(ord(s[i]), ord(s[i-1]))
adjust(Alphabet.full_upper) # Uppercase adjustment
adjust(Alphabet.full_order) # Lowercase adjustment
# Now we have a case-sensitive sorting map: copy it
Alphabet._lcmap = lcmap[:]
# Create a case-insensitive sorting map, where the lower case
# characters have the same sort value as the upper case ones
for i, c in enumerate(Alphabet.full_order):
lcmap[ord(c)] = lcmap[ord(Alphabet.full_upper[i])]
# Store the case-insensitive sorting map
Alphabet._lcmap_nocase = lcmap
@staticmethod
def sortkey(lstr):
""" Key function for locale-based sorting """
assert Alphabet._lcmap
return [Alphabet._lcmap[ord(c)] if ord(c) <= 255 else 256 for c in lstr]
@staticmethod
def sortkey_nocase(lstr):
""" Key function for locale-based sorting, case-insensitive """
assert Alphabet._lcmap_nocase
return [Alphabet._lcmap_nocase[ord(c)] if ord(c) <= 255 else 256 for c in lstr]
# Initialize the locale collation (sorting) map
Alphabet._init() # pylint: disable=W0212
# noinspection PyUnresolvedReferences
class TileSet(object):
""" Abstract base class for tile sets. Concrete classes are found below. """
# The following will be overridden in derived classes
scores = dict()
bag_tiles = []
@classmethod
def score(cls, tiles):
""" Return the net (plain) score of the given tiles """
if not tiles:
return 0
return sum([cls.scores[tile] for tile in tiles])
@classmethod
def full_bag(cls):
""" Return a full bag of tiles """
if not hasattr(cls, "_full_bag"):
# Cache the bag
cls._full_bag = u''.join([tile * count for (tile, count) in cls.bag_tiles])
return cls._full_bag
@classmethod
def num_tiles(cls):
""" Return the total number of tiles in this tile set """
return sum(n for letter, n in cls.bag_tiles)
class OldTileSet(TileSet):
""" The old (original) Icelandic tile set """
# Letter scores in the old (original) Icelandic tile set
scores = {
u'a': 1,
u'á': 4,
u'b': 6,
u'd': 4,
u'ð': 2,
u'e': 1,
u'é': 6,
u'f': 3,
u'g': 2,
u'h': 3,
u'i': 1,
u'í': 4,
u'j': 5,
u'k': 2,
u'l': 2,
u'm': 2,
u'n': 1,
u'o': 3,
u'ó': 6,
u'p': 8,
u'r': 1,
u's': 1,
u't': 1,
u'u': 1,
u'ú': 8,
u'v': 3,
u'x': 10,
u'y': 7,
u'ý': 9,
u'þ': 4,
u'æ': 5,
u'ö': 7,
u'?': 0
}
# Tiles in initial bag, with frequencies
bag_tiles = [
(u"a", 10),
(u"á", 2),
(u"b", 1),
(u"d", 2),
(u"ð", 5),
(u"e", 6),
(u"é", 1),
(u"f", 3),
(u"g", 4),
(u"h", 2),
(u"i", 8),
(u"í", 2),
(u"j", 1),
(u"k", 3),
(u"l", 3),
(u"m", 3),
(u"n", 8),
(u"o", 3),
(u"ó", 1),
(u"p", 1),
(u"r", 7),
(u"s", 6),
(u"t", 5),
(u"u", 6),
(u"ú", 1),
(u"v", 2),
(u"x", 1),
(u"y", 1),
(u"ý", 1),
(u"þ", 1),
(u"æ", 1),
(u"ö", 1),
(u"?", 2) # Blank tiles
]
# Number of tiles in bag
OldTileSet.BAG_SIZE = OldTileSet.num_tiles()
class NewTileSet(TileSet):
""" The new Icelandic tile set, created by Skraflfélag Íslands """
# Scores in new Icelandic tile set
scores = {
u'a': 1,
u'á': 3,
u'b': 5,
u'd': 5,
u'ð': 2,
u'e': 3,
u'é': 7,
u'f': 3,
u'g': 3,
u'h': 4,
u'i': 1,
u'í': 4,
u'j': 6,
u'k': 2,
u'l': 2,
u'm': 2,
u'n': 1,
u'o': 5,
u'ó': 3,
u'p': 5,
u'r': 1,
u's': 1,
u't': 2,
u'u': 2,
u'ú': 4,
u'v': 5,
u'x': 10,
u'y': 6,
u'ý': 5,
u'þ': 7,
u'æ': 4,
u'ö': 6,
u'?': 0
}
# New Icelandic tile set
bag_tiles = [
(u"a", 11),
(u"á", 2),
(u"b", 1),
(u"d", 1),
(u"ð", 4),
(u"e", 3),
(u"é", 1),
(u"f", 3),
(u"g", 3),
(u"h", 1),
(u"i", 7),
(u"í", 1),
(u"j", 1),
(u"k", 4),
(u"l", 5),
(u"m", 3),
(u"n", 7),
(u"o", 1),
(u"ó", 2),
(u"p", 1),
(u"r", 8),
(u"s", 7),
(u"t", 6),
(u"u", 6),
(u"ú", 1),
(u"v", 1),
(u"x", 1),
(u"y", 1),
(u"ý", 1),
(u"þ", 1),
(u"æ", 2),
(u"ö", 1),
(u"?", 2) # Blank tiles
]
# Number of tiles in bag
NewTileSet.BAG_SIZE = NewTileSet.num_tiles()
|
import pandas as pd
from pulp import *
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def best_reco(required_resources, instance_df):
prob = LpProblem("InstanceRecommender", LpMinimize)
instances = instance_df['name'].values
instance_dict = instance_df.set_index('name').T.to_dict()
instance_vars = LpVariable.dicts(
"Instance", instances, lowBound=0, cat='Integer')
prob += lpSum([instance_dict[i]['price'] * instance_vars[i]
for i in instances])
prob += lpSum([instance_dict[i]['vcpus'] * instance_vars[i]
for i in instances]) >= required_resources['vcpus']
prob += lpSum([instance_dict[i]['memory'] * instance_vars[i]
for i in instances]) >= required_resources['memory']
prob.solve()
print("Status:", LpStatus[prob.status])
best_reco = pd.DataFrame([
{'name': remove_prefix(v.name, "Instance_"), 'units': v.varValue}
for v in prob.variables() if v.varValue > 0]
)
best_reco = best_reco.merge(instance_df)
return best_reco
|
import os
import sys
from setuptools import setup, find_packages
extra = {}
if sys.version_info < (3, 2):
extra['install_requires'] = "futures >= 2.1.6" # backport of py32 concurrent.futures module
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name='pySmartDL',
version='1.2.5',
url='http://pypi.python.org/pypi/pySmartDL/',
author='Itay Brandes',
author_email='itay.brandes+pysmartdl@gmail.com',
license='Public Domain',
packages=find_packages(),
description='A Smart Download Manager for Python',
long_description=open('README.md').read(),
test_suite = "test.test_pySmartDL.test_suite",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Environment :: Console',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
"License :: Public Domain",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
**extra
)
|
import logging
import sys
import os
# SECRETS
DATABASE_URL = os.getenv('DATABASE_URL')
# Logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] - %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TaxSummary(object):
"""
Tax implication that current tenant may be eligible while using specific listing
"""
def __init__(self, **kwargs):
"""
Initializes a new TaxSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param code:
The value to assign to the code property of this TaxSummary.
:type code: str
:param name:
The value to assign to the name property of this TaxSummary.
:type name: str
:param country:
The value to assign to the country property of this TaxSummary.
:type country: str
:param url:
The value to assign to the url property of this TaxSummary.
:type url: str
"""
self.swagger_types = {
'code': 'str',
'name': 'str',
'country': 'str',
'url': 'str'
}
self.attribute_map = {
'code': 'code',
'name': 'name',
'country': 'country',
'url': 'url'
}
self._code = None
self._name = None
self._country = None
self._url = None
@property
def code(self):
"""
**[Required]** Gets the code of this TaxSummary.
Unique code for the tax.
:return: The code of this TaxSummary.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this TaxSummary.
Unique code for the tax.
:param code: The code of this TaxSummary.
:type: str
"""
self._code = code
@property
def name(self):
"""
Gets the name of this TaxSummary.
Name of the tax code.
:return: The name of this TaxSummary.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this TaxSummary.
Name of the tax code.
:param name: The name of this TaxSummary.
:type: str
"""
self._name = name
@property
def country(self):
"""
Gets the country of this TaxSummary.
Country, which imposes the tax.
:return: The country of this TaxSummary.
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""
Sets the country of this TaxSummary.
Country, which imposes the tax.
:param country: The country of this TaxSummary.
:type: str
"""
self._country = country
@property
def url(self):
"""
Gets the url of this TaxSummary.
The URL with more details about this tax.
:return: The url of this TaxSummary.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""
Sets the url of this TaxSummary.
The URL with more details about this tax.
:param url: The url of this TaxSummary.
:type: str
"""
self._url = url
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
from moto.iam.responses import IamResponse, GENERIC_EMPTY_TEMPLATE
from moto.iam.models import iam_backend as moto_iam_backend
from localstack import config
from localstack.constants import DEFAULT_PORT_IAM_BACKEND
from localstack.services.infra import start_moto_server
def patch_moto():
def delete_policy(self):
policy_arn = self._get_param('PolicyArn')
moto_iam_backend.managed_policies.pop(policy_arn, None)
template = self.response_template(GENERIC_EMPTY_TEMPLATE)
return template.render(name='DeletePolicyResponse')
if not hasattr(IamResponse, 'delete_policy'):
IamResponse.delete_policy = delete_policy
def start_iam(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_IAM
patch_moto()
return start_moto_server('iam', port, name='IAM', asynchronous=asynchronous,
backend_port=DEFAULT_PORT_IAM_BACKEND, update_listener=update_listener)
|
# 200. Number of Islands
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input: grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
Output: 1
Example 2:
Input: grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
Output: 3
'''
class Solution:
'''
In-storage replacement + DFS.
O(mn) runtime, O(mn) storage for call stack.
Beat 99% runtime, 52% storage of all Leetcode submissions.
'''
def numIslands(self, grid):
m = len(grid)
if m == 0: return 0
def exploreIsland(i,j):
grid[i][j] = '0'
if j > 0 and grid[i][j-1] == '1': exploreIsland(i,j-1)
if j < n-1 and grid[i][j+1] == '1': exploreIsland(i,j+1)
if i > 0 and grid[i-1][j] == '1': exploreIsland(i-1,j)
if i < m-1 and grid[i+1][j] == '1': exploreIsland(i+1,j)
n,out = len(grid[0]),0
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
out += 1
exploreIsland(i,j)
return out
'''
In-storage replacemen + BFS.
O(mn) runtime, O(min(m,n)) storage. See explanation why it is O(min(m,n)) from smeanionn: https://imgur.com/gallery/M58OKvB.
Beat 99% runtime, 82% storage of all Leetcode submissions.
'''
def numIslands2(self, grid):
m = len(grid)
if m == 0: return 0
n,out = len(grid[0]),0
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
layer = set([(i,j)])
while layer:
new_layer = set() # Use set instead of list. Otherwise, you may add same item more than 1 time.
for i1,j1 in layer:
grid[i1][j1] = '0'
if i1 > 0 and grid[i1-1][j1] == '1': new_layer.add((i1-1,j1))
if i1 < m-1 and grid[i1+1][j1] == '1': new_layer.add((i1+1,j1))
if j1 > 0 and grid[i1][j1-1] == '1': new_layer.add((i1,j1-1))
if j1 < n-1 and grid[i1][j1+1] == '1': new_layer.add((i1,j1+1))
layer = new_layer
out += 1
return out
# Tests.
assert(Solution().numIslands([
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]) == 1)
assert(Solution().numIslands([
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]) == 3)
assert(Solution().numIslands([
["1","1","0","0","0"],
["1","1","1","0","0"],
["0","0","1","0","0"],
["0","1","1","1","1"]
]) == 1)
assert(Solution().numIslands([]) == 0)
assert(Solution().numIslands([["0","1","0","1","0"]]) == 2)
assert(Solution().numIslands([["0"],["1"],["1"],["1"],["0"]]) == 1)
assert(Solution().numIslands2([
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]) == 1)
assert(Solution().numIslands2([
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]) == 3)
assert(Solution().numIslands2([
["1","1","0","0","0"],
["1","1","1","0","0"],
["0","0","1","0","0"],
["0","1","1","1","1"]
]) == 1)
assert(Solution().numIslands2([]) == 0)
assert(Solution().numIslands2([["0","1","0","1","0"]]) == 2)
assert(Solution().numIslands2([["0"],["1"],["1"],["1"],["0"]]) == 1)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Serializer, Deserializer
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from ._configuration_async import ResourceManagementClientConfiguration
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class ResourceManagementClient(MultiApiClientMixin, _SDKClient):
"""Provides operations for working with resources and resource groups.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str api_version: API version to use if no profile is provided, or if
missing in profile.
:param str base_url: Service URL
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2019-10-01'
_PROFILE_TAG = "azure.mgmt.resource.ResourceManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "AsyncTokenCredential"
subscription_id, # type: str
api_version=None,
base_url=None,
profile=KnownProfiles.default,
**kwargs # type: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ResourceManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(ResourceManagementClient, self).__init__(
credential,
self._config,
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-02-01: :mod:`v2016_02_01.models<azure.mgmt.resource.v2016_02_01.models>`
* 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.resource.v2016_09_01.models>`
* 2017-05-10: :mod:`v2017_05_10.models<azure.mgmt.resource.v2017_05_10.models>`
* 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.resource.v2018_02_01.models>`
* 2018-05-01: :mod:`v2018_05_01.models<azure.mgmt.resource.v2018_05_01.models>`
* 2019-05-01: :mod:`v2019_05_01.models<azure.mgmt.resource.v2019_05_01.models>`
* 2019-05-10: :mod:`v2019_05_10.models<azure.mgmt.resource.v2019_05_10.models>`
* 2019-07-01: :mod:`v2019_07_01.models<azure.mgmt.resource.v2019_07_01.models>`
* 2019-08-01: :mod:`v2019_08_01.models<azure.mgmt.resource.v2019_08_01.models>`
* 2019-10-01: :mod:`v2019_10_01.models<azure.mgmt.resource.v2019_10_01.models>`
"""
if api_version == '2016-02-01':
from ..v2016_02_01 import models
return models
elif api_version == '2016-09-01':
from ..v2016_09_01 import models
return models
elif api_version == '2017-05-10':
from ..v2017_05_10 import models
return models
elif api_version == '2018-02-01':
from ..v2018_02_01 import models
return models
elif api_version == '2018-05-01':
from ..v2018_05_01 import models
return models
elif api_version == '2019-05-01':
from ..v2019_05_01 import models
return models
elif api_version == '2019-05-10':
from ..v2019_05_10 import models
return models
elif api_version == '2019-07-01':
from ..v2019_07_01 import models
return models
elif api_version == '2019-08-01':
from ..v2019_08_01 import models
return models
elif api_version == '2019-10-01':
from ..v2019_10_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version))
@property
def deployment_operations(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2016_02_01.aio.operations_async.DeploymentOperationsOperations>`
* 2016-09-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2016_09_01.aio.operations_async.DeploymentOperationsOperations>`
* 2017-05-10: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2017_05_10.aio.operations_async.DeploymentOperationsOperations>`
* 2018-02-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2018_02_01.aio.operations_async.DeploymentOperationsOperations>`
* 2018-05-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2018_05_01.aio.operations_async.DeploymentOperationsOperations>`
* 2019-05-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2019_05_01.aio.operations_async.DeploymentOperationsOperations>`
* 2019-05-10: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2019_05_10.aio.operations_async.DeploymentOperationsOperations>`
* 2019-07-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2019_07_01.aio.operations_async.DeploymentOperationsOperations>`
* 2019-08-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2019_08_01.aio.operations_async.DeploymentOperationsOperations>`
* 2019-10-01: :class:`DeploymentOperationsOperations<azure.mgmt.resource.v2019_10_01.aio.operations_async.DeploymentOperationsOperations>`
"""
api_version = self._get_api_version('deployment_operations')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import DeploymentOperationsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def deployments(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2016_02_01.aio.operations_async.DeploymentsOperations>`
* 2016-09-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2016_09_01.aio.operations_async.DeploymentsOperations>`
* 2017-05-10: :class:`DeploymentsOperations<azure.mgmt.resource.v2017_05_10.aio.operations_async.DeploymentsOperations>`
* 2018-02-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2018_02_01.aio.operations_async.DeploymentsOperations>`
* 2018-05-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2018_05_01.aio.operations_async.DeploymentsOperations>`
* 2019-05-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2019_05_01.aio.operations_async.DeploymentsOperations>`
* 2019-05-10: :class:`DeploymentsOperations<azure.mgmt.resource.v2019_05_10.aio.operations_async.DeploymentsOperations>`
* 2019-07-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2019_07_01.aio.operations_async.DeploymentsOperations>`
* 2019-08-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2019_08_01.aio.operations_async.DeploymentsOperations>`
* 2019-10-01: :class:`DeploymentsOperations<azure.mgmt.resource.v2019_10_01.aio.operations_async.DeploymentsOperations>`
"""
api_version = self._get_api_version('deployments')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import DeploymentsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import DeploymentsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2018-05-01: :class:`Operations<azure.mgmt.resource.v2018_05_01.aio.operations_async.Operations>`
* 2019-05-01: :class:`Operations<azure.mgmt.resource.v2019_05_01.aio.operations_async.Operations>`
* 2019-05-10: :class:`Operations<azure.mgmt.resource.v2019_05_10.aio.operations_async.Operations>`
* 2019-07-01: :class:`Operations<azure.mgmt.resource.v2019_07_01.aio.operations_async.Operations>`
* 2019-08-01: :class:`Operations<azure.mgmt.resource.v2019_08_01.aio.operations_async.Operations>`
* 2019-10-01: :class:`Operations<azure.mgmt.resource.v2019_10_01.aio.operations_async.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import Operations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import Operations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import Operations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import Operations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import Operations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import Operations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def providers(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`ProvidersOperations<azure.mgmt.resource.v2016_02_01.aio.operations_async.ProvidersOperations>`
* 2016-09-01: :class:`ProvidersOperations<azure.mgmt.resource.v2016_09_01.aio.operations_async.ProvidersOperations>`
* 2017-05-10: :class:`ProvidersOperations<azure.mgmt.resource.v2017_05_10.aio.operations_async.ProvidersOperations>`
* 2018-02-01: :class:`ProvidersOperations<azure.mgmt.resource.v2018_02_01.aio.operations_async.ProvidersOperations>`
* 2018-05-01: :class:`ProvidersOperations<azure.mgmt.resource.v2018_05_01.aio.operations_async.ProvidersOperations>`
* 2019-05-01: :class:`ProvidersOperations<azure.mgmt.resource.v2019_05_01.aio.operations_async.ProvidersOperations>`
* 2019-05-10: :class:`ProvidersOperations<azure.mgmt.resource.v2019_05_10.aio.operations_async.ProvidersOperations>`
* 2019-07-01: :class:`ProvidersOperations<azure.mgmt.resource.v2019_07_01.aio.operations_async.ProvidersOperations>`
* 2019-08-01: :class:`ProvidersOperations<azure.mgmt.resource.v2019_08_01.aio.operations_async.ProvidersOperations>`
* 2019-10-01: :class:`ProvidersOperations<azure.mgmt.resource.v2019_10_01.aio.operations_async.ProvidersOperations>`
"""
api_version = self._get_api_version('providers')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import ProvidersOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import ProvidersOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def resource_groups(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2016_02_01.aio.operations_async.ResourceGroupsOperations>`
* 2016-09-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2016_09_01.aio.operations_async.ResourceGroupsOperations>`
* 2017-05-10: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2017_05_10.aio.operations_async.ResourceGroupsOperations>`
* 2018-02-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2018_02_01.aio.operations_async.ResourceGroupsOperations>`
* 2018-05-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2018_05_01.aio.operations_async.ResourceGroupsOperations>`
* 2019-05-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2019_05_01.aio.operations_async.ResourceGroupsOperations>`
* 2019-05-10: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2019_05_10.aio.operations_async.ResourceGroupsOperations>`
* 2019-07-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2019_07_01.aio.operations_async.ResourceGroupsOperations>`
* 2019-08-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2019_08_01.aio.operations_async.ResourceGroupsOperations>`
* 2019-10-01: :class:`ResourceGroupsOperations<azure.mgmt.resource.v2019_10_01.aio.operations_async.ResourceGroupsOperations>`
"""
api_version = self._get_api_version('resource_groups')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import ResourceGroupsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import ResourceGroupsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def resources(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`ResourcesOperations<azure.mgmt.resource.v2016_02_01.aio.operations_async.ResourcesOperations>`
* 2016-09-01: :class:`ResourcesOperations<azure.mgmt.resource.v2016_09_01.aio.operations_async.ResourcesOperations>`
* 2017-05-10: :class:`ResourcesOperations<azure.mgmt.resource.v2017_05_10.aio.operations_async.ResourcesOperations>`
* 2018-02-01: :class:`ResourcesOperations<azure.mgmt.resource.v2018_02_01.aio.operations_async.ResourcesOperations>`
* 2018-05-01: :class:`ResourcesOperations<azure.mgmt.resource.v2018_05_01.aio.operations_async.ResourcesOperations>`
* 2019-05-01: :class:`ResourcesOperations<azure.mgmt.resource.v2019_05_01.aio.operations_async.ResourcesOperations>`
* 2019-05-10: :class:`ResourcesOperations<azure.mgmt.resource.v2019_05_10.aio.operations_async.ResourcesOperations>`
* 2019-07-01: :class:`ResourcesOperations<azure.mgmt.resource.v2019_07_01.aio.operations_async.ResourcesOperations>`
* 2019-08-01: :class:`ResourcesOperations<azure.mgmt.resource.v2019_08_01.aio.operations_async.ResourcesOperations>`
* 2019-10-01: :class:`ResourcesOperations<azure.mgmt.resource.v2019_10_01.aio.operations_async.ResourcesOperations>`
"""
api_version = self._get_api_version('resources')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import ResourcesOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import ResourcesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def tags(self):
"""Instance depends on the API version:
* 2016-02-01: :class:`TagsOperations<azure.mgmt.resource.v2016_02_01.aio.operations_async.TagsOperations>`
* 2016-09-01: :class:`TagsOperations<azure.mgmt.resource.v2016_09_01.aio.operations_async.TagsOperations>`
* 2017-05-10: :class:`TagsOperations<azure.mgmt.resource.v2017_05_10.aio.operations_async.TagsOperations>`
* 2018-02-01: :class:`TagsOperations<azure.mgmt.resource.v2018_02_01.aio.operations_async.TagsOperations>`
* 2018-05-01: :class:`TagsOperations<azure.mgmt.resource.v2018_05_01.aio.operations_async.TagsOperations>`
* 2019-05-01: :class:`TagsOperations<azure.mgmt.resource.v2019_05_01.aio.operations_async.TagsOperations>`
* 2019-05-10: :class:`TagsOperations<azure.mgmt.resource.v2019_05_10.aio.operations_async.TagsOperations>`
* 2019-07-01: :class:`TagsOperations<azure.mgmt.resource.v2019_07_01.aio.operations_async.TagsOperations>`
* 2019-08-01: :class:`TagsOperations<azure.mgmt.resource.v2019_08_01.aio.operations_async.TagsOperations>`
* 2019-10-01: :class:`TagsOperations<azure.mgmt.resource.v2019_10_01.aio.operations_async.TagsOperations>`
"""
api_version = self._get_api_version('tags')
if api_version == '2016-02-01':
from ..v2016_02_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2016-09-01':
from ..v2016_09_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2017-05-10':
from ..v2017_05_10.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2018-02-01':
from ..v2018_02_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2018-05-01':
from ..v2018_05_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2019-05-01':
from ..v2019_05_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2019-05-10':
from ..v2019_05_10.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2019-07-01':
from ..v2019_07_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2019-08-01':
from ..v2019_08_01.aio.operations_async import TagsOperations as OperationClass
elif api_version == '2019-10-01':
from ..v2019_10_01.aio.operations_async import TagsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
async def close(self):
await self._client.close()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details):
await self._client.__aexit__(*exc_details)
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
name = "Hello World"
return name
@app.route('/good')
def good():
name = "Good"
return name
# メイン・プログラムとして走らせるとき。
if __name__ == "__main__":
app.run(debug=True)
|
def main():
print("In Bake")
|
import tarfile
import os
import numpy as np
import cv2
import h5py
import random
# Parameters
test_count = 1000
dataset_name = "basic_io"
directory = "/media/neduchal/data2/datasety/miniplaces/images"
categories_filename = "./categories_io.txt"
output_path = "/media/neduchal/data2/datasety/places365_256x256_prepared"
val_path = "/media/neduchal/data2/datasety/miniplaces/miniplaces/data/val.txt"
print("Creating dataset " + dataset_name)
print()
desc_file = open(categories_filename, "r").read().split("\n")
print("loading classes")
if desc_file[-1] == "":
desc_file = desc_file[:-1]
# Classes loading
classes = []
classes_nums = []
classes_io = []
for row in desc_file:
items = row.split(" ")
classes.append(items[0])
classes_nums.append(items[1])
classes_io.append(items[2])
# Get directories
train_directory = os.path.join(directory, "train")
test_directory = os.path.join(directory, "test")
val_directory = os.path.join(directory, "val")
# Get number of chars of directory
fname_begin_train = len(train_directory)
fname_begin_test = len(test_directory)
fname_begin_val = len(val_directory)
# Set output directory and creates it if it is not exists
output_directory = os.path.join(output_path, dataset_name)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print("VAL")
print("loading val files")
val_names = []
for root, dirs, files in os.walk(val_directory):
for name in files:
val_names.append(os.path.join(root, name))
classes_val = open(val_path).read().split("\n")
print("loading val data")
val_data_x = []
val_data_y = []
val_data_y_io = []
for i, filename in enumerate(val_names):
print(int(100*(i/len(val_names))))
im = cv2.imread(filename)
index = classes_val.index(filename[fname_begin_val:-13])
val_data_x.append(im)
inout = int(classes_io[int(classes_val[i].split(" ")[1])])
val_data_y.append(int(classes_val[i].split(" ")[1]))
val_data_y_io.append(inout)
print("saving val data")
val_f = h5py.File(os.path.join(output_directory, 'val.h5'), 'w')
val_f["data_x"] = val_data_x
val_f["data_y"] = val_data_y
val_f["data_y_io"] = val_data_y_io
val_f.flush()
val_f.close()
del(val_data_x)
del(val_data_y)
print("TRAIN")
desc_file = open("./categories_io.txt",
"r").read().split("\n")
print("loading classes")
if desc_file[-1] == "":
desc_file = desc_file[:-1]
classes = []
classes_nums = []
classes_io = []
classes_count = []
for row in desc_file:
items = row.split(" ")
classes.append(items[0])
classes_nums.append(items[1])
classes_io.append(items[2])
classes_count.append(0)
print("loading train data")
train_names = []
for root, dirs, files in os.walk(train_directory):
for name in files:
train_names.append(os.path.join(root, name))
random.shuffle(train_names)
test_names = train_names[0:int(0.1 * len(train_names))]
train_names = train_names[int(0.1 * len(train_names)):]
print(len(test_names), train_names)
train_data_x = []
train_data_y = []
train_data_y_io = []
print("processing train data")
for i, filename in enumerate(train_names):
if (i % 5000) == 0:
print(int(100*(i/len(train_names))))
index = classes.index(filename[fname_begin_train:-13])
if classes_count[index] >= 100:
continue
classes_count[index] += 1
im = cv2.imread(filename)
train_data_x.append(im)
train_data_y.append(int(classes_nums[index]))
train_data_y_io.append(int(classes_io[index]))
print("saving train data")
train_f = h5py.File(os.path.join(output_directory, 'train.h5'), 'w')
train_f["data_x"] = train_data_x
train_f["data_y"] = train_data_y
train_f["data_y_io"] = train_data_y_io
train_f.flush()
train_f.close()
del(train_data_x)
del(train_data_y)
del(train_data_y_io)
print("TEST")
print("loading test data")
test_data_x = []
test_data_y = []
test_data_y_io = []
for i, filename in enumerate(test_names):
if (i % 100) == 0:
print(int(100*(i/test_count)))
if i >= test_count:
break
im = cv2.imread(filename)
index = classes.index(filename[fname_begin_train:-13])
test_data_x.append(im)
test_data_y.append(int(classes_nums[index]))
test_data_y_io.append(int(classes_io[index]))
print("saving test h5 file")
test_f = h5py.File(os.path.join(output_directory, 'test.h5'), 'w')
test_f["data_x"] = test_data_x
test_f["data_y"] = test_data_y
test_f["data_y_io"] = test_data_y_io
test_f.flush()
test_f.close()
del(test_data_x)
del(test_data_y)
del(test_data_y_io)
print("DONE")
|
import RPi.GPIO as GPIO
import time
front_lights_left_pin = 5
front_lights_right_pin = 6
rear_lights_pin = 13
power_pin = 12
fwd_pin = 17
back_pin = 27
left_pin = 24
right_pin = 23
pwm_fwd = None
pwm_back = None
pwm_left = None
pwm_right = None
pwm_front_lights_left = None
pwm_front_lights_right = None
pwm_rear_lights = None
blink = 0
def init():
global fwd_pin, back_pin
global pwm_fwd, pwm_back
global left_pin, right_pin
global pwm_left, pwm_right
global front_lights_left_pin, front_lights_right_pin
global rear_lights_pin, pwm_front_lights_left, pwm_front_lights_right, pwm_rear_lights
GPIO.setmode(GPIO.BCM)
GPIO.setup(front_lights_left_pin, GPIO.OUT)
GPIO.setup(front_lights_right_pin, GPIO.OUT)
GPIO.setup(rear_lights_pin, GPIO.OUT)
pwm_front_lights_left = GPIO.PWM(front_lights_left_pin, 120)
pwm_front_lights_right = GPIO.PWM(front_lights_right_pin, 120)
pwm_rear_lights = GPIO.PWM(rear_lights_pin, 120)
#GPIO.setup(power_pin, GPIO.OUT)
GPIO.setup(left_pin, GPIO.OUT)
pwm_left = GPIO.PWM(left_pin, 100)
GPIO.setup(right_pin, GPIO.OUT)
pwm_right = GPIO.PWM(right_pin, 100)
GPIO.setup(fwd_pin, GPIO.OUT)
pwm_fwd = GPIO.PWM(fwd_pin, 100)
GPIO.setup(back_pin, GPIO.OUT)
pwm_back = GPIO.PWM(back_pin, 100)
def changeDutyCycle(pwm, dc, hz):
if dc == 0:
pwm.stop()
else:
pwm.ChangeFrequency(hz)
pwm.start(dc)
def apply_state(state):
global pwm_fwd, pwm_back
global pwm_left, pwm_right
global pwm_front_lights_left, pwm_front_lights_right, pwm_rear_lights
changeDutyCycle(pwm_rear_lights, state['rear_lights'], 120)
blink = state['blink']
if blink == -1:
changeDutyCycle(pwm_front_lights_left, 50, 1)
changeDutyCycle(pwm_front_lights_right, state['front_lights'], 120)
elif blink == 1:
changeDutyCycle(pwm_front_lights_left, state['front_lights'], 120)
changeDutyCycle(pwm_front_lights_right, 50, 1)
else:
changeDutyCycle(pwm_front_lights_left, state['front_lights'], 120)
changeDutyCycle(pwm_front_lights_right, state['front_lights'], 120)
changeDutyCycle(pwm_fwd, state['fwd'] * state['pwm_move'], 100)
changeDutyCycle(pwm_back, state['back'] * state['pwm_move'], 100)
changeDutyCycle(pwm_left, state['left'] * state['pwm_steer'], 100)
changeDutyCycle(pwm_right, state['right'] * state['pwm_steer'], 100)
def cleanup():
global pwm_fwd, pwm_back
global pwm_left, pwm_right
global pwm_front_lights_left, pwm_front_lights_right, pwm_rear_lights
pwm_fwd.stop()
pwm_back.stop()
pwm_left.stop()
pwm_right.stop()
pwm_front_lights_left.stop()
pwm_front_lights_right.stop()
pwm_rear_lights.stop()
GPIO.cleanup()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Soulweaver'
import os
import logging
import argparse
from randomizer import PROG_NAME, PROG_VERSION, config, Randomizer
def add_enable_disable_argument(p, name, default=False, help_enable=None, help_disable=None,
default_passage=' (Default)', dest_name=None, **kwargs):
dest_name = dest_name if dest_name else name.replace('-', '_')
if help_enable is not None:
help_enable += default_passage if default else ''
if help_disable is not None:
help_disable += default_passage if not default else ''
group = p.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help_enable, **kwargs)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help_disable, **kwargs)
p.set_defaults(**{dest_name: default})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Randomizes a Pokémon Colosseum or XD ISO.",
add_help=False
)
parser_behavior_group = parser.add_argument_group('Tool behavior')
parser_behavior_group.add_argument(
'-v', '--version',
action='version',
help="Display the program version and exit.",
version="{} version {}".format(PROG_NAME, PROG_VERSION)
)
parser_behavior_group.add_argument(
'-h', '--help',
action='help',
help="Display this help message and exit."
)
parser_behavior_group.add_argument(
'iso_path',
action='store',
help='A path to the ISO file to be randomized. Required.',
metavar='isopath'
)
parser_behavior_group.add_argument(
'-o', '--output-path',
action='store',
help='The path where the randomized ISO file will be written. Required unless --in-place is set, '
'in which case its value is ignored.',
metavar='PATH'
)
parser_behavior_group.add_argument(
'--in-place',
action='store_true',
help='Do the randomization in-place. This means the original ISO file will be overwritten!'
)
parser_behavior_group.add_argument(
'-l', '--loglevel',
action='store',
help='The desired verbosity level. Any messages with the same or higher '
'level than the chosen one will be displayed. '
'Available values: critical, error, warning, info, debug. Default: info.',
choices=['critical', 'error', 'warning', 'info', 'debug'],
type=str.lower,
default='info',
metavar='level'
)
parser_behavior_group.add_argument(
'--dump-files',
action='store_true',
help='Dumps the files extracted from the ISO and the files to be written to the ISO. '
'This option is only useful to you if you are a developer.'
)
parser_behavior_group.add_argument(
'--seed',
action='store',
type=int,
metavar='SEED',
help='Set the randomizer seed. Using the same randomizer version with the same ISO, same options and '
'the same seed results in identical randomization results. By default, a random seed provided by your '
'operating system will be used.'
)
parser_pkmn_randomization_group = parser.add_argument_group('Pokémon randomization options')
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkstats',
default=config.rng_pkstats,
help_enable='Enable Pokémon base stats randomization.',
help_disable='Keep the original Pokémon base stats. Other --rng-pkstats flags take no effect.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkstats-retain-bst',
default=config.rng_pkstats_retain_bst,
help_enable='Redistribute the Pokémon\'s base stat total rather than '
'setting the values to fully random values.',
help_disable='Ignore the Pokémon\'s original base stat total.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkstats-family',
default=config.rng_pkstats_family,
help_enable='Try to make the stat distributions consistent within each evolution family.',
help_disable='Ignore the stat distribution of other evolution family members.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkstats-family-vary-branched-evo',
default=config.rng_pkstats_family_vary_branched_evo,
help_enable='Shuffle the base stat factors for evolutions other than the first when the Pokémon has more '
'than one evolution.',
help_disable='Use the same base stat factors even with branched evolutions, leading to branches having '
'very similar stats.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkstats-variance',
action='store',
default=config.rng_pkstats_variance,
type=float,
metavar='VAR',
help='Decides the variance for the Gauss distribution according to which the base stat factors are generated. '
'Lower values result in more uniform stats, while higher values result in more extreme stats. '
'(Default: 0.35)'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkstats-wg-1hp',
default=config.rng_pkstats_wg_1hp,
help_enable='A Pokémon that has Wonder Guard is also always set to have 1 HP.',
help_disable='A Pokémon with Wonder Guard is randomized like any other Pokémon.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pktypes',
default=config.rng_pktypes,
help_enable='Enable Pokémon type randomization.',
help_disable='Keep the original Pokémon types. Other --rng-pktypes flags take no effect.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pktypes-family',
default=config.rng_pktypes_family,
help_enable='Try to make typing in evolution families more natural. Evolutions keep the primary type of the '
'previous stage Pokémon and may gain or alter the second type.',
help_disable='Ignore the types of other evolution family members.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktypes-family-change-ratio',
action='store',
default=config.rng_pktypes_family_change_ratio,
type=int,
metavar='RATIO',
help='Control the percentage probability of an evolved form gaining a new type. '
'Only used if --rng-pktypes-family is also enabled. '
'(Default: 33)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktypes-monotype-ratio',
action='store',
default=config.rng_pktypes_monotype_ratio,
type=int,
metavar='RATIO',
help='Control the percentage probability of the randomizer forcing a Pokémon to only have a single type. '
'This restriction does not apply to evolved forms if --rng-pktypes-family is enabled. '
'(Default: 33)'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkabi',
default=config.rng_pkabi,
help_enable='Enable Pokémon ability randomization.',
help_disable='Keep the original Pokémon abilities. Other --rng-pkabi flags take no effect.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkabi-family',
default=config.rng_pkabi_family,
help_enable='Try to make abilities in evolution families more natural. Evolutions have a chance of keeping '
'the abilities of the previous stage Pokémon.',
help_disable='Ignore the types of other evolution family members.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkabi-family-change-ratio',
action='store',
default=config.rng_pkabi_family_change_ratio,
type=int,
metavar='RATIO',
help='Control the percentage probability of an evolved form getting its abilities also randomized. '
'Only used if --rng-pkabi-family is also enabled. '
'(Default: 33)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkabi-monoabi-ratio',
action='store',
default=config.rng_pkabi_monoabi_ratio,
type=int,
metavar='RATIO',
help='Control the percentage probability of the randomizer forcing a Pokémon to only have a single ability. '
'This restriction does not apply to evolved forms if --rng-pkabi-family is enabled. '
'(Default: 33)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkabi-ban',
nargs='*',
metavar='ABILITY',
help='Forbid specific abilities to be given to any Pokémon whose ability is randomized. '
'A list of abilities in upper case, with spaces converted to underscores, is expected. '
'You can allow all abilities by providing the single ability name NONE. '
'(Default: ' + (', '.join(config.rng_pkabi_ban)) + ')'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkmoves',
default=config.rng_pkmoves,
help_enable='Enable Pokémon moveset randomization.',
help_disable='Keep the original Pokémon movesets. Other --rng-pkmoves flags take no effect.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkmoves-dmg-progression',
default=config.rng_pkmoves_dmg_progression,
help_enable='Rearrange damaging moves so that weaker moves are learned first.',
help_disable='Don\'t rearrange damaging moves.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkmoves-lv1-fullset',
default=config.rng_pkmoves_lv1_fullset,
help_enable='Provide every Pokémon with four moves at level one.',
help_disable='Do not add additional level one move slots.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkmoves-ensure-damaging-first',
default=config.rng_pkmoves_lv1_ensure_damaging,
help_enable='Make sure that the first move the Pokémon learns is a damaging move.',
help_disable='The first move the Pokémon learns can be a status move.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkmoves-ensure-damaging-interval',
default=config.rng_pkmoves_ensure_damaging_interval,
help_enable='Make sure at least every fourth move the Pokémon learns is a damaging move.',
help_disable='The spacing of damaging moves is not controlled.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkmoves-any-type-ratio',
action='store',
default=config.rng_pkmoves_any_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of a movepool of a randomized learned move containing damaging moves '
'of a type the Pokémon doesn\'t have. All non-damaging moves will still be available for each move slot, '
'as well as Normal-type moves, unless otherwise enforced by --rng-pkmoves-min-own-type-ratio. '
'(Default: 25)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkmoves-min-damaging-ratio',
action='store',
default=config.rng_pkmoves_min_damaging_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of a movepool for choosing a randomized learned move only containing '
'damaging moves. '
'(Default: 25)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkmoves-min-own-type-ratio',
action='store',
default=config.rng_pkmoves_min_own_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of a movepool for choosing a randomized learned move only containing '
' moves with a type of that Pokémon itself. '
'(Default: 15)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkmoves-ban',
nargs='*',
metavar='MOVE',
help='Forbid specific moves to be put into the moveset of any Pokémon whose move pool is randomized. '
'A list of moves in upper case, with spaces converted to underscores, is expected. '
'You can allow all moves by providing the single move name NONE. '
'(Default: ' + (', '.join(config.rng_pkmoves_ban)) + ')'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkmoves-no-dupes',
default=config.rng_pkmoves_no_dupes,
help_enable='Make sure any Pokémon doesn\'t learn the same move twice.',
help_disable='Pokémon are allowed to learn the same move multiple times.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pktm',
default=config.rng_pktm,
help_enable='Enable Pokémon TM learnset randomization.',
help_disable='Keep the original Pokémon TM learnsets. Other --rng-pktm flags take no effect.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktm-min-own-type-ratio',
action='store',
default=config.rng_pktm_min_own_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn TMs that contain damaging moves '
'with the same type as the Pokémon itself. '
'(Default: 90)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktm-min-normal-type-ratio',
action='store',
default=config.rng_pktm_min_normal_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn TMs that contain Normal-type '
'damaging moves. This ratio is not used for Normal-type Pokémon themselves. '
'(Default: 75)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktm-min-other-type-ratio',
action='store',
default=config.rng_pktm_min_other_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn TMs that contain damaging moves '
'with a different type as the Pokémon itself, excluding Normal-type moves. '
'(Default: 40)'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktm-min-status-ratio',
action='store',
default=config.rng_pktm_min_status_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn TMs that contain non-damaging'
' moves. (Default: 75)'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pktm-family',
default=config.rng_pktm_family,
help_enable='Allow evolved Pokémon to always learn all TMs their pre-evolutions can learn.',
help_disable='Pre-evolution TM learnsets are not considered.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pktm-full-compat',
action='store_true',
help='All Pokémon learn all TMs. This is a shorthand for setting all the other TM ratio variables to 100.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkevo',
default=config.rng_pkevo,
help_enable='Randomize the evolutions of Pokémon that originally evolve.',
help_disable='Keep original Pokémon evolutions.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkevo-shuffle',
default=config.rng_pkevo_shuffle,
help_enable='Shuffle existing evolutions rather than fully randomizing each evolution independently.',
help_disable='Pick every evolution at random.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkevo-samestage',
default=config.rng_pkevo_samestage,
help_enable='Limit randomization so that first stages only evolve into second stages and second stages '
'into third stages. Note that baby Pokémon are considered first stages for the purposes of the '
'randomizer.',
help_disable='Allow evolutions to randomize to any other Pokémon. This may lead into strong Pokémon evolving '
'into weak Pokémon and long or circular evolution chains.'
)
add_enable_disable_argument(
parser_pkmn_randomization_group,
'rng-pkitem',
default=config.rng_pkitem,
help_enable='Randomize the items the wild Pokémon can be holding.',
help_disable='Keep original items.'
)
parser_pkmn_randomization_group.add_argument(
'--rng-pkitem-ratio',
action='store',
default=config.rng_pkitem_ratio,
type=int,
metavar='NUM',
help='Control the probability of a wild Pokémon holding an item in the first place. (Default: 33)'
)
parser_move_randomization_group = parser.add_argument_group('Move randomization options')
add_enable_disable_argument(
parser_move_randomization_group,
'rng-tm-moves',
default=config.rng_tm_moves,
help_enable='Randomize the moves taught by the 50 Technical Machines.',
help_disable='Keep the original TM moves.'
)
add_enable_disable_argument(
parser_move_randomization_group,
'rng-move-power',
default=config.rng_move_power,
help_enable='Randomize the base power of each damaging move to a value between 10 and 180, divisible by 5.',
help_disable='Do not change the move base powers.'
)
add_enable_disable_argument(
parser_move_randomization_group,
'rng-move-types',
default=config.rng_move_types,
help_enable='Randomize the type of every move except Curse and Struggle. Note that this cannot make normal '
'moves shadow moves and vice versa. The resulting types will be used when determining Pokémon '
'learn-up movesets if --rng-pkmoves is enabled.',
help_disable='Do not change the typing of moves.'
)
add_enable_disable_argument(
parser_move_randomization_group,
'rng-move-accuracy',
default=config.rng_move_accuracy,
help_enable='Randomize the accuracy of every move that uses the accuracy check. The accuracy of each move '
'will be between 30%% and 100%%, divisible by 5, with a tendency towards 100%% accurate moves.',
help_disable='Do not change the accuracy of moves.'
)
add_enable_disable_argument(
parser_move_randomization_group,
'rng-move-pp',
default=config.rng_move_pp,
help_enable='Randomize the PP of every move to a value between 5 and 40, divisible by 5.',
help_disable='Do not change the PP of moves.'
)
parser_trainer_randomization_group = parser.add_argument_group('Trainer randomization options')
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers',
default=config.rng_trainers,
help_enable='Enable randomization of trainers\' Pokémon.',
help_disable='Do not randomize trainer Pokémon.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-cat-story',
default=config.rng_trainers_cat_story,
help_enable='Randomize story related trainers.',
help_disable='Keep original teams for story related trainers.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-cat-mt-battle',
default=config.rng_trainers_cat_mt_battle,
help_enable='Randomize Mt. Battle related trainers. '
'The same data is also used in Colosseum for the Battle Now mode.',
help_disable='Keep original teams for Mt. Battle related trainers.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-cat-colo-battle',
default=config.rng_trainers_cat_colo_battle,
help_enable='Randomize the Pokémon pools in the Colosseum Battle mode. (Colo only)',
help_disable='Keep original Pokémon in Colosseum Battle Mode.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-cat-quick-battle',
default=config.rng_trainers_cat_quick_battle,
help_enable='Randomize the Pokémon pools in the Quick Battle mode. (XD only)',
help_disable='Keep original Pokémon in Quick Battle Mode.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-cat-bingo',
default=config.rng_trainers_cat_bingo,
help_enable='Randomize the Pokémon in Battle Bingo. May result in unwinnable bingo cards. (XD only)',
help_disable='Keep original Pokémon in Battle Bingo. The bingo cards may be winnable depending on other '
'randomization options.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-cat-battle-sim',
default=config.rng_trainers_cat_battle_sim,
help_enable='Randomize the Pokémon in Battle CDs. May result in unwinnable scenarios. (XD only)',
help_disable='Keep original Pokémon in Battle CDs. The setups may be winnable depending on other randomization '
'options.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-unique-shadow',
default=config.rng_trainers_unique_shadow,
help_enable='Ensure that all Shadow Pokémon are unique.',
help_disable='Allow duplicate Shadow Pokémon.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-power-progression',
default=config.rng_trainers_power_progression,
help_enable='The average base stat total of Pokémon in trainers\' teams rises by the Pokémon level, leading to '
'natural power progression where early game trainers are weak and late game trainers are strong.',
help_disable='Don\'t restrict Pokémon based on their power.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-level-up-only',
default=config.rng_trainers_level_up_only,
help_enable='Trainers\' Pokémon only use the last four level-up moves based on the current level.',
help_disable='Trainers\' Pokémon can have any level-up or TM moves up to the current level.'
)
add_enable_disable_argument(
parser_trainer_randomization_group,
'rng-trainers-item',
default=config.rng_trainers_item,
help_enable='Randomize the items the Pokémon can be holding.',
help_disable='Keep original items.'
)
parser_trainer_randomization_group.add_argument(
'--rng-trainers-item-ratio',
action='store',
default=config.rng_trainers_item_ratio,
type=int,
metavar='NUM',
help='Control the probability of a trainer Pokémon holding an item in the first place. (Default: 33)'
)
parser_item_randomization_group = parser.add_argument_group('Item randomization options')
add_enable_disable_argument(
parser_item_randomization_group,
'rng-items',
default=config.rng_items,
help_enable='Enable randomization of items lying around in Orre.',
help_disable='Do not randomize item boxes.'
)
add_enable_disable_argument(
parser_item_randomization_group,
'rng-items-shuffle',
default=config.rng_items_shuffle,
help_enable='Redistribute the existing items into the different boxes.',
help_disable='Randomize each item individually.'
)
parser_item_randomization_group.add_argument(
'--rng-items-berry-reroll',
action='store',
default=config.rng_items_berry_reroll,
type=int,
metavar='NUM',
help='Reroll the random item this many times if the result was a berry. Has no effect if --rng-items-shuffle '
'is enabled. (Default: 1)'
)
add_enable_disable_argument(
parser_item_randomization_group,
'rng-items-random-qty',
default=config.rng_items_random_qty,
help_enable='Randomize the quantities of items.',
help_disable='Keep original quantities.'
)
parser_gift_pkmn_randomization_group = parser.add_argument_group('Gift/Starter Pokémon options')
add_enable_disable_argument(
parser_gift_pkmn_randomization_group,
'rng-starters',
default=config.rng_starters,
help_enable='Enable editing starter Pokémon.',
help_disable='The default starter Pokémon are used.'
)
parser_gift_pkmn_randomization_group.add_argument(
'--rng-starters-fixed',
nargs='*',
metavar='SPECIES',
help='Rather than randomizing the starter Pokémon, specify which species the starter(s) should be. '
'For Colosseum, provide two species, and for XD, provide one. Note the non-standard spellings of the '
'following Pokémon that must be used: NIDORAN_F, NIDORAN_M, FARFETCH_D, MR_MIME, HO_OH'
)
parser_gift_pkmn_randomization_group.add_argument(
'--rng-starters-max-bst',
type=int,
metavar='BST',
default=config.rng_starters_max_bst,
help='Limit the base stats total of the starter Pokémon to be smaller or equal to this value. Set to a '
'high value to allow all Pokémon as starters. (Default: 500)'
)
add_enable_disable_argument(
parser_gift_pkmn_randomization_group,
'rng-trade-wants',
default=config.rng_trade_wants,
help_enable='Randomize the Pokémon requested by NPCs who can trade with the player. In XD, Hordel is also '
'normalized to accept the purified form of the same Pokémon he originally gives to you.',
help_disable='Keep the requested Pokémon for trades as they are. In XD, Hordel will also always accept '
'a purified Togepi or Togetic, overriding --rng-gifts if set.'
)
add_enable_disable_argument(
parser_gift_pkmn_randomization_group,
'rng-trade-offers',
default=config.rng_trade_offers,
help_enable='Randomize the Pokémon received from the NPC trades.',
help_disable='Keep the original Pokémon the NPCs trade to you.'
)
add_enable_disable_argument(
parser_gift_pkmn_randomization_group,
'rng-gifts',
default=config.rng_gifts,
help_enable='Randomize the gift Pokémon the NPCs give to the player.',
help_disable='Retain the original gift Pokémon.'
)
parser_tutor_randomization_group = parser.add_argument_group('Tutor randomization options (XD)')
add_enable_disable_argument(
parser_move_randomization_group,
'rng-tutor-moves',
default=config.rng_tutor_moves,
help_enable='Randomize the moves taught by the Move Tutor in Agate Village.',
help_disable='Keep the original tutor moves.'
)
add_enable_disable_argument(
parser_tutor_randomization_group,
'rng-pktutor',
default=config.rng_pktutor,
help_enable='Enable Pokémon tutor learnset randomization.',
help_disable='Keep the original Pokémon tutor learnsets. Other --rng-pktutor flags take no effect.'
)
parser_tutor_randomization_group.add_argument(
'--rng-pktutor-min-own-type-ratio',
action='store',
default=config.rng_pktutor_min_own_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn damaging tutor moves with the same '
'type as the Pokémon itself. '
'(Default: 90)'
)
parser_tutor_randomization_group.add_argument(
'--rng-pktutor-min-normal-type-ratio',
action='store',
default=config.rng_pktutor_min_normal_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn damaging Normal-type tutor moves. '
'This ratio is not used for Normal-type Pokémon themselves. '
'(Default: 75)'
)
parser_tutor_randomization_group.add_argument(
'--rng-pktutor-min-other-type-ratio',
action='store',
default=config.rng_pktutor_min_other_type_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn damaging tutor moves with a '
'different type as the Pokémon itself, excluding Normal-type moves. '
'(Default: 40)'
)
parser_tutor_randomization_group.add_argument(
'--rng-pktutor-min-status-ratio',
action='store',
default=config.rng_pktutor_min_status_ratio,
type=int,
metavar='RATIO',
help='Control the probability percentage of the Pokémon being able to learn non-damaging tutor moves. '
'(Default: 75)'
)
add_enable_disable_argument(
parser_tutor_randomization_group,
'rng-pktutor-family',
default=config.rng_pktutor_family,
help_enable='Allow evolved Pokémon to always learn all tutor moves their pre-evolutions can learn.',
help_disable='Pre-evolution tutor learnsets are not considered.'
)
parser_tutor_randomization_group.add_argument(
'--rng-pktutor-full-compat',
action='store_true',
help='All Pokémon learn all tutor moves. This is a shorthand for setting all the other tutor ratio variables '
'to 100.'
)
add_enable_disable_argument(
parser_tutor_randomization_group,
'early-tutors',
dest_name='patch_early_tutors',
default=config.patch_early_tutors,
help_enable='Make all tutor moves available from the start of the game.',
help_disable='Let tutor moves stay locked until specific game events.'
)
parser_wild_randomization_group = parser.add_argument_group('Poké Spot randomization options (XD)')
add_enable_disable_argument(
parser_wild_randomization_group,
'rng-pokespot',
default=config.rng_pokespot,
help_enable='Randomizes the species available from Poké Spots.',
help_disable='Retain the original Poké Spot species.'
)
add_enable_disable_argument(
parser_wild_randomization_group,
'rng-pokespot-improve-levels',
default=config.rng_pokespot_improve_levels,
help_enable='Raises the range of available Pokémon in Poké Spots.',
help_disable='Retain the original Poké Spot levels.'
)
add_enable_disable_argument(
parser_wild_randomization_group,
'rng-pokespot-bst-based',
default=config.rng_pokespot_bst_based,
help_enable='Allow only Pokémon with suitable base stat total (using the same algorithm as '
'--rng-trainers-power-progression) to appear at Poké Spots.',
help_disable='Any Pokémon can appear at Poké Spots.'
)
parser_patches_group = parser.add_argument_group('Miscellaneous patches')
add_enable_disable_argument(
parser_patches_group,
'update-evolutions',
dest_name='patch_impossible_evolutions',
default=config.patch_impossible_evolutions,
help_enable='Alter evolutions that would otherwise require connecting to another game to happen on a specific '
'level or with a specific evolution item instead. Note that evolutions are changed before they are '
'randomized, if --rng-pkevo is enabled. Evolutions are changed as follows: '
'Kadabra evolves into Alakazam at level 32. '
'Machoke, Graveler and Haunter evolve into Machamp, Golem and Gengar at level 37. '
'Onix, Porygon, Scyther and Feebas evolve into Steelix, Porygon2, Scizor and Milotic at level 30. '
'Seadra evolves into Kingdra at level 42. '
'Poliwhirl and Clamperl evolve into Politoed and Gorebyss with a Sun Stone. '
'Slowpoke and Clamperl evolve into Politoed and Huntail with a Moon Stone. '
'Additionally, in Colosseum, Eevee will also evolve into Espeon and Umbreon with Sun and Moon '
'Stone, respectively.',
help_disable='Do not change evolution methods. Some evolutions will be unavailable.'
)
add_enable_disable_argument(
parser_patches_group,
'improve-catch-rate',
default=config.improve_catch_rate,
help_enable='Improve the catch rates of Pokémon.',
help_disable='Keep original catch rates.'
)
parser_patches_group.add_argument(
'--improve-catch-rate-minimum',
action='store',
default=config.improve_catch_rate_minimum,
type=int,
metavar='NUM',
help='Set the minimum catch rate allowed for a Pokémon, from 1 to 255. Pokémon whose catch rate is higher '
'will keep their original catch rates. (Default: 90)'
)
add_enable_disable_argument(
parser_patches_group,
'fix-name-casing',
default=config.fix_name_casing,
help_enable='If the ISO region is either USA or Europe, change the Pokémon, item, etc. names to use '
'natural casing rather than all uppercase.',
help_disable='Do not alter name casing.'
)
args = parser.parse_args()
if args.in_place is not True and args.output_path is None or args.output_path == args.iso_path:
print('Error: a unique output path must be specified when not randomizing in-place!')
exit(1)
logging.basicConfig(level=getattr(logging, args.loglevel.upper()),
format="{asctime} [{levelname}] {message}", style='{')
config.configure(working_dir=os.path.dirname(__file__), **vars(args))
randomizer = Randomizer(rom_path=args.iso_path, output_path=args.output_path, in_place=args.in_place)
randomizer.randomize()
|
import sys # For argv
import getopt # For options
import re # regex replacing
import numpy as np
from itertools import combinations
class Coalition:
def __init__(self,coalition,numplayers):
self.input_coalitions = coalition
self.numplayers = numplayers
self.best_structure = {}
self.key_value = {}
self.best_val = 0
self.best_coalition = ""
def permutes(self,key):
unformatted_combos = []
for i in range(1,len(key)//2 + 1):
unformatted_combos += list(combinations(key,i))
combos = [""]*len(unformatted_combos)
for i in range(len(unformatted_combos)):
combos[i] = "".join(unformatted_combos[i][:])
combos = [combos[i] + "," for i in range(len(combos))]
for i in range(len(combos)):
for j in range(len(key)):
if key[j] not in combos[i]:
combos[i] += key[j]
# Only do assert from here because we have some redundancies in the small
# keys.
# Commenting out checks because you might run different code
#if key == "123":
# assert(combos==['1,23','2,13','3,12'])
#elif key == '124':
# assert(combos==['1,24','2,14','4,12'])
#elif key == '134':
# assert(combos==['1,34','3,14','4,13'])
#elif key == '234':
# assert(combos==['2,34','3,24','4,23'])
#elif key == "1234":
# # up to -1 because our method gives a '34,12' which is redundant
# assert(combos[:-1]==['1,234','2,134','3,124','4,123','12,34','13,24','14,23','23,14','24,13'])
return combos
# Originally did this to check the answer. Then checked above version with
# these keys. They appear to match
#if key == "12":
# return ["1,2"]
#elif key == '13':
# return ['1,3']
#elif key == '14':
# return ['1,4']
#elif key == '23':
# return ['2,3']
#elif key == '24':
# return ['2,4']
#elif key == '34':
# return ['3,4']
#elif key == "123":
# return ['1,23','2,13','3,12']
#elif key == '124':
# return ['1,24','2,14','4,12']
#elif key == '134':
# return ['1,34','3,14','4,13']
#elif key == '234':
# return ['2,34','3,24','4,23']
#elif key == "1234":
# return ['1,234','2,134','3,124','4,123','12,34','13,24','14,23','23,14','24,13']
def set_structure(self,key,val):
n_key = sort(key)
if n_key in best_strcuture:
print("This key is already here")
exit(1)
self.best_structure[n_key] = key
self.key_value[n_key] = val
if self.best_val is None:
self.best_val = val
elif val > self.best_val:
self.best_val = val
def get_value(self,key):
t = 0
for k in key.split(','):
#if k == ',': continue
#if self.has_key(k):
if k in self.key_value:
tt = self.key_value[k]
else:
t_key = [key[i] for i in k.split(',')]
tt = max([max(t,t_key[i]) for i in range(len(tk))])
t += tt
return t
def optimal_val(self,key):
if key not in self.key_value:
#self.key_value[key] = max(self.input_coalitions[key],max([self.get_value(k) for k in self.permutes(key)]))
self.best_structure[key] = key
self.key_value[key] = self.input_coalitions[key]
permutes = self.permutes(key)
for k in permutes:
if self.get_value(k) > self.key_value[key]:
self.key_value[key] = self.get_value(k)
self.best_structure[key] = k
if self.key_value[key] > self.best_val:
self.best_val = self.key_value[key]
def get_optimal(self):
for key in self.input_coalitions:
if len(key) == 1:
self.key_value[key] = self.input_coalitions[key]
self.best_structure[key] = key
else:
self.optimal_val(key)
#print(self.best_val)
self.best_coalition = self.get_best_coalition(key)
#print(self.best_coalition)
def get_best_coalition(self,key):
keys = self.best_structure[key].split(',')
return ','.join((self.best_structure[k] for k in keys))
def print_to_file(self,output):
s = '{{' + self.best_coalition.replace(',','},{') + "}}"
s += "," + str(self.best_val) + "\n"
with open(output,'w') as _file:
_file.write(s)
_file.close()
def readfile(filename:str, d:str=None,dtype_=str):
r""" numpy read file wrapper """
return np.loadtxt(str(filename), delimiter=d, dtype=dtype_)
def print_usage():
r""" Prints file usage """
print("usage: coalition.py -i <input file> -o <output file>")
print("-h, --help\t prints this message")
print("-i, --input\t sets the input file")
print("-o, --output\t sets the output file: defaults to optimalCS.txt")
def command_line_args(argv):
r""" Handles the command line arguments """
try:
opts, args = getopt.getopt(argv,"h:i:o:d:",["help","input=",\
"output=","delimiter="])
except getopt.GetoptError:
print_usage()
exit(1)
output = "optimalCS.txt"
for opt, arg in opts:
if opt in ("-h", "--help"):
print_usage()
exit(0)
elif opt in ("-i", "--input"):
in_file = readfile(str(arg))
elif opt in ("-o", "--output"):
output = str(arg)
return in_file,output
def format_input(input_file):
#r""" Returns arrays of the coalitions and rewards """
r""" Returns dictionary of the coalitions and rewards """
numplayers = np.int(input_file[0])
coalitions = {}
for i in range(1,len(input_file)):
data = re.sub("{|}","",input_file[i]).rsplit(',',1)
coalitions[re.sub(",","",data[0])] = np.float(data[1])
return numplayers,coalitions
def main(argv):
in_file,out_file = command_line_args(argv)
n,c = format_input(in_file)
C = Coalition(c,n)
C.get_optimal()
C.print_to_file(out_file)
if __name__ == '__main__':
if len(sys.argv) <= 2:
print_usage()
exit(1)
main(sys.argv[1:])
|
''' Base class for the test cases.
Python ``unittest`` framework does not allows large scale of
parameterized tests. This simple base class is just a workaround
to allow to run the tests for various conditions.
'''
import unittest
import numpy as np
import torch
FLOAT_TOLPLACES = 2
FLOAT_TOL = 10 ** (-FLOAT_TOLPLACES)
DOUBLE_TOLPLACES = 6
DOUBLE_TOL = 10 ** (-DOUBLE_TOLPLACES)
class BaseTest(unittest.TestCase):
def __init__(self, methodName='runTest', tensor_type='float', gpu=False,
seed=13):
super().__init__(methodName)
self.tensor_type = tensor_type
self.gpu = gpu
self.seed(seed)
@property
def tol(self):
return FLOAT_TOL if self.tensor_type == 'float' else DOUBLE_TOL
@property
def tolplaces(self):
return FLOAT_TOLPLACES if self.tensor_type == 'float' else \
DOUBLE_TOLPLACES
@property
def type(self):
return torch.FloatTensor if self.tensor_type == 'float' else \
torch.DoubleTensor
def seed(self, seed):
torch.manual_seed(seed)
if self.gpu:
torch.cuda.manual_seed(seed)
def assertArraysAlmostEqual(self, arr1, arr2):
try:
fail = False
self.assertTrue(np.allclose(arr1, arr2, atol=self.tol))
except AssertionError as error:
fail = True
raise error
finally:
if fail:
print(arr1, arr2)
@staticmethod
def get_testsuite(class_name, tensor_type='float', gpu=False, seed=13):
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(class_name)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(class_name(name, tensor_type, gpu, seed))
return suite
|
# coding: utf-8
from PIL import Image, ImageFont, ImageDraw
from subprocess import call
class WordGenerator:
"""
Generates pithy statements for our visual content
"""
source = None
@staticmethod
def get_content(length=100):
return "I sat on a rug, biding my time, drinking her wine"
class ImageGenerator:
"""
Steals pretty images for the content to sit on
"""
@staticmethod
def get_image():
return "test:Boat"
class BitterGravelist:
word_generator = None
image_generator = None
def __init__(self, word_generator=WordGenerator(), image_generator=ImageGenerator):
self.word_generator = word_generator
self.image_generator = image_generator
def generate_gravel(self):
image = Image.open(self.image_generator.get_image())
draw = ImageDraw.Draw(image)
color = '#ffc800'
# create border
padding = 10
thickness = 5
top = padding
left = padding
right = image.size[0] - padding
bottom = image.size[1] - padding
# turns out the method wants left, *then* top. switch.
draw.rectangle((top, left, bottom, left+thickness), fill=color)
draw.rectangle((top, left, top+thickness, right), fill=color)
draw.rectangle((top, right, bottom, right-thickness), fill=color)
draw.rectangle((bottom, left+thickness, bottom, right-thickness), fill=color)
text = self.word_generator.get_content()
font = ImageFont.truetype("Helvetica", 16)
draw.text((10, 25), text, font=font)
image_save_file_name = '/Users/danieljilg//Desktop/output.png'
image.show()
# image.save(image_save_file_name, 'PNG')
# call(['open', image_save_file_name])
if __name__ == '__main__':
BitterGravelist().generate_gravel()
|
#!/usr/bin/python -u
# From https://learn.adafruit.com/adafruits-raspberry-pi-lesson-11-ds18b20-temperature-sensing/software
import os
import glob
import time
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
# Grabs the first probe out of the directory
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
def c_to_f(c):
return c * 9.0 / 5.0 + 32.0
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
return float(temp_string) / 1000.0
while True:
temp = read_temp()
print(temp, c_to_f(temp))
time.sleep(1)
|
def curry(f, *a, **kw):
def curried(*more_a, **more_kw):
return f(*(a+more_a), dict(kw, **more_kw))
return curried
|
from setuptools import setup, find_packages
import codecs
import os
with open("README.md", "r") as fh:
long_description = fh.read()
# Setting up
setup(
name="auto-machine-learning",
version='0.0.12',
license='MIT',
author="Mihir Gada, Zenil Haria, Arnav Mankad, Kaustubh Damania",
author_email="",
url = 'https://github.com/mihir2510/AutoML_library',
download_url ='https://pypi.org/project/auto-machine-learning/',
project_urls={
"Documentation": "https://github.com/mihir2510/AutoML_library",
"Source Code": "https://github.com/mihir2510/AutoML_library",
},
description='This is an python Library for AutoML which works for prediction and classification tasks.',
long_description_content_type="text/markdown",
long_description=long_description,
packages=find_packages(),
install_requires=[
'imblearn',
'pandas',
'scikit-optimize',
'hyperopt',
'scikit-learn==0.24.1',
'kiwisolver==1.3.1',
'matplotlib==3.3.4',
'Pillow==8.1.0',
'openpyxl',
'plotly',
'pytest',
'pytest-runner',
'seaborn',
'psutil',
'kaleido',
],
keywords=['automl', 'data preprocessing','feature engineering','ensembling','super learner'],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
#Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
include_package_data=True
)
|
# Generated by Django 3.1.1 on 2020-11-13 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mascotas', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Mascotas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ide', models.CharField(max_length=10)),
('codigo', models.CharField(max_length=10)),
('ide_tipo', models.CharField(max_length=10)),
('ide_raza', models.CharField(max_length=10)),
('nombre', models.CharField(max_length=100)),
('tiene_vacunas', models.CharField(max_length=100)),
('estado', models.CharField(max_length=100)),
('fecha_creacion', models.CharField(max_length=100)),
('fecha_modificacion', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Razas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ide', models.CharField(max_length=10)),
('code', models.CharField(max_length=10)),
('nombre', models.CharField(max_length=100)),
('abreviatura', models.CharField(max_length=100)),
('id_tipo', models.CharField(max_length=10)),
('estado', models.CharField(max_length=100)),
('fecha_creacion', models.CharField(max_length=100)),
('fecha_modificacion', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Tipos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ide', models.CharField(max_length=10)),
('code', models.CharField(max_length=10)),
('nombre', models.CharField(max_length=100)),
('abreviatura', models.CharField(max_length=100)),
('estado', models.CharField(max_length=100)),
('fecha_creacion', models.CharField(max_length=100)),
('fecha_modificacion', models.CharField(max_length=100)),
],
),
migrations.DeleteModel(
name='Afiliados',
),
]
|
import graphene
from .book import BookCreateMutation, BookDeleteMutation
from .auth import AuthMutation, RefreshMutation
class MainMutation(graphene.ObjectType):
book = BookCreateMutation.Field()
delete = BookDeleteMutation.Field()
auth = AuthMutation.Field()
refresh = RefreshMutation.Field()
|
from . import _endpoints as endpoints
import requests
import json
from statham.schema.elements import Object
from statham.schema.constants import NotPassed
class StathamJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Object):
return {type(o).properties[k].source: v for k, v in o._dict.items() if not isinstance(v, NotPassed)}
return json.JSONEncoder.default(self, o)
class Client:
def __init__(self, api_key, api_endpoint_base='https://api2.toshl.com'):
self.api_key = api_key
self.api_endpoint_base = api_endpoint_base
self.accounts = endpoints.Accounts(self)
self.budgets = endpoints.Budgets(self)
self.categories = endpoints.Categories(self)
self.currencies = endpoints.Currencies(self)
self.entries = endpoints.Entries(self)
self.exports = endpoints.Exports(self)
self.images = endpoints.Images(self)
self.me = endpoints.Me(self)
self.tags = endpoints.Tags(self)
def request(self, href, method, argument_type=None, return_type=None, **kwargs):
options = {}
if argument_type:
# Remap kwargs (which are modified to avoid Python reserved keywords) back into
# the source keys of the argument object.
remap = {}
for k, v in kwargs.items():
remap[argument_type.properties[k].source] = v
# Construct the argument, which will validate all kwargs.
argument = argument_type(remap)
# If we GET, use the original remap, otherwise, JSON encode the argument.
if method == 'GET':
options['params'] = remap
else:
options['data'] = json.dumps(argument, cls=StathamJSONEncoder)
options['headers'] = {'Content-Type': 'application/json'}
# Do the request.
response = requests.request(method,
self.api_endpoint_base + href.format(**kwargs),
auth=(self.api_key, ''),
**options)
# Check if the response is OK.
if response.ok:
# Attempt to construct the return type, handling lists, and some
# dicts especially (Toshl decided that on some endpoints such as
# the currencies list that they'd actually return a dict).
if return_type:
plain = response.json()
if isinstance(plain, list):
return [return_type(p) for p in plain]
elif set(plain.keys()).issubset(set(p.source for p in return_type.properties.values())):
return return_type(response.json())
elif isinstance(plain, dict):
return {k: return_type(v) for k, v in plain.items()}
else:
return plain
else:
response.raise_for_status()
|
from typing import Optional
from xml.dom import minidom as dom
import time
class Message:
'Represents a fragment of message.'
def __init__(self, user: Optional[str], message: str, timestamp: float):
'Initializes a new instance.'
self._user = user
self._message = message
self._timestamp = timestamp
@property
def user(self) -> Optional[str]:
'The user who sent this message.'
return self._user
@property
def message(self) -> str:
'The message text.'
return self._message
@property
def timestamp(self) -> float:
'The timestamp of this message.'
return self._timestamp
class MessageManager:
'Represents a collection of messages.'
def __init__(self, room_name: str):
'Initializes with a room name.'
self._messages = []
self._roomName = room_name
def messageReceived(self, user: str, message: str) -> None:
'Called when a new message occurred.'
timestamp = time.time()
self._messages.append(Message(user, message, timestamp))
def metaMessage(self, message: str) -> None:
'Called when a meta message was received.'
timestamp = time.time()
self._messages.append(Message(None, message, timestamp))
def exportMessages(self) -> str:
'Exports the messages as xml.'
doc = dom.getDOMImplementation().createDocument(None, 'messages', dom.DocumentType('messages'))
doc.documentElement.setAttribute("room", self._roomName)
for message in self._messages:
elem = doc.createElement("message")
elem.setAttribute("user", message.user)
elem.setAttribute("timestamp", str(message.timestamp))
elem.appendChild(
doc.createTextNode(message.message)
)
doc.documentElement.appendChild(elem)
return doc.toprettyxml(indent=' ')
|
import atexit
import collections
import logging
import os
import os.path
import re
from pathlib import Path
from tempfile import NamedTemporaryFile, gettempdir
from typing import IO, Any, Dict, List, Optional
import mypy
from mypy import api as mypy_api
from mypy.defaults import CONFIG_FILES as MYPY_CONFIG_FILES
from pylsp import hookimpl
from pylsp.config.config import Config
from pylsp.workspace import Document, Workspace
line_pattern = re.compile(r"((?:^[a-z]:)?[^:]+):(?:(\d+):)?(?:(\d+):)? (\w+): (.*)")
logger = logging.getLogger("pylsp.plugins.mypy_rnx")
logger.info(f"Using mypy located at: {mypy.__file__}")
def _get_dmypy_status_filepath() -> Path:
tmpdir = Path(gettempdir())
unique_mypy_path_per_python_exec = Path(mypy.__file__).parent.relative_to(
Path.home()
)
dmypy_status_dirpath = tmpdir / unique_mypy_path_per_python_exec
if not dmypy_status_dirpath.exists():
dmypy_status_dirpath.mkdir(parents=True)
return dmypy_status_dirpath / "dmypy-status.json"
class State:
initialized: bool = False
mypy_config_file: Optional[str] = None
livemode_tmpfile: IO[str] = NamedTemporaryFile("w", delete=False)
# In non-live-mode the file contents aren't updated.
# Returning an empty diagnostic clears the diagnostic result,
# so store a cache of last diagnostics for each file a-la the pylint plugin,
# so we can return some potentially-stale diagnostics.
# https://github.com/python-lsp/python-lsp-server/blob/v1.0.1/pylsp/plugins/pylint_lint.py#L55-L62
last_diagnostics: Dict[str, List[Any]] = collections.defaultdict(list)
# dmypy stuff (keep one state file per mypy executable):
dmypy_daemon_status: Optional[int] = None
dmypy_status_file: str = _get_dmypy_status_filepath().as_posix()
def parse_line(
line: str, document: Optional[Document] = None
) -> Optional[Dict[str, Any]]:
result = line_pattern.match(line)
logger.info(line)
if not result:
return None
file_path, linenoStr, offsetStr, severity, msg = result.groups()
if file_path != "<string>": # live mode
# results from other files can be included, but we cannot return them.
if document and document.path and not document.path.endswith(file_path):
msg = f"discarding result for {file_path} against {document.path}"
logger.warning(msg)
return None
lineno = int(linenoStr or 1) - 1 # 0-based line number
offset = int(offsetStr or 1) - 1 # 0-based offset
errno = 1 if severity == "error" else 2
range_diag = {
"start": {"line": lineno, "character": offset},
# There may be a better solution, but mypy does not provide end
"end": {"line": lineno, "character": offset + 1},
}
diag: Dict[str, Any] = {
"source": "mypy",
"range": range_diag,
"message": msg,
"severity": errno,
}
if document:
# although mypy does not provide the end of the affected range, we
# can make a good guess by highlighting the word that Mypy flagged
word = document.word_at_position(diag["range"]["start"])
if word:
diag["range"]["end"]["character"] = diag["range"]["start"][
"character"
] + len(word)
return diag
def _ensure_finding_config_file(config: Config) -> None:
# Check for mypy config file to be used
if State.mypy_config_file:
return
workspace = config._root_path
logger.info(f"Searching for mypy config file from {workspace}")
for filepath in MYPY_CONFIG_FILES:
location = os.path.join(workspace, filepath)
if os.path.isfile(location):
State.mypy_config_file = location
logger.info(f"Found mypy config file at {State.mypy_config_file}")
break
@hookimpl
def pylsp_settings(config: Config) -> Dict[str, Any]:
_ensure_finding_config_file(config)
return {
"plugins": {
"pylsp_mypy_rnx": {
"enabled": True,
"live_mode": True,
"args": [],
"dmypy": False,
"daemon_args": {},
}
}
}
def parse_run(
document: Document, report: str, errors: str, exit_status: int
) -> List[Dict[str, Any]]:
logger.debug(f"report:\n{report}")
if errors:
logger.warning(f"errors:\n{errors}")
logger.warning(f"exit_status: {exit_status}")
last_diags = []
for line in report.splitlines():
logger.debug("parsing: line = %r", line)
diag = parse_line(line, document)
if diag:
last_diags.append(diag)
logger.info("pylsp_mypy_rnx len(diagnostics) = %s", len(last_diags))
State.last_diagnostics[document.path] = last_diags
return last_diags
def _to_dmypy_cmd(cmd: List[str]) -> str:
return f"'dmypy {' '.join(cmd)}'"
@hookimpl
def pylsp_lint(
workspace: Workspace, document: Document, is_saved: bool
) -> List[Dict[str, Any]]:
config = workspace._config
settings = config.plugin_settings("pylsp_mypy_rnx", document_path=document.path)
if not State.initialized:
logger.info(f"lint settings: {settings}")
logger.info(f"document.path: {document.path}")
State.initialized = True
live_mode = settings["live_mode"]
dmypy = settings["dmypy"]
if dmypy and live_mode:
# dmypy can only be efficiently run on files that have been saved, see:
# https://github.com/python/mypy/issues/9309
logger.warning("live_mode is not supported with dmypy, disabling")
live_mode = False
args = settings["args"]
args = [
*args,
"--show-column-numbers",
"--follow-imports",
"normal",
"--incremental",
]
if State.mypy_config_file:
args.extend(["--config-file", State.mypy_config_file])
if not is_saved:
if not live_mode: # Early return in case of not saved and not live-mode
last_diags = State.last_diagnostics.get(document.path, [])
msg = f"non-live, returning cached diagnostics ({len(last_diags)})"
logger.debug(msg)
return last_diags
else:
# use shadow file in live-mode
logger.info(f"live_mode with tmpfile: {State.livemode_tmpfile.name}")
with open(State.livemode_tmpfile.name, "w") as f:
f.write(document.source)
args.extend(["--shadow-file", document.path, State.livemode_tmpfile.name])
if not dmypy:
cmd = [*args, document.path]
logger.info(f"executing: 'mypy {' '.join(cmd)}")
report, errors, exit_status = mypy_api.run(cmd)
return parse_run(document, report, errors, exit_status)
daemon_args = ["--status-file", State.dmypy_status_file]
if State.dmypy_daemon_status is None:
msg = f"dmypy - status file can be found at {State.dmypy_status_file}"
logger.info(msg)
# First check if no daemon already running
cmd = [*daemon_args, "status"]
logger.info(f"call dmypy status: {_to_dmypy_cmd(cmd)}")
result, errors, State.dmypy_daemon_status = mypy_api.run_dmypy(cmd)
if State.dmypy_daemon_status != 0: # not yet up and running
logger.debug(errors)
daemon_args_start = [
*daemon_args,
"start",
*(settings.get("daemon_args", {}).get("start", [])),
]
cmd = [*daemon_args_start, "--", *args, document.path]
logger.info(f"call dmypy start: {_to_dmypy_cmd(cmd)}")
_, errors, State.dmypy_daemon_status = mypy_api.run_dmypy(cmd)
if State.dmypy_daemon_status != 0:
logger.warning(errors)
logger.debug(f"current dmypy daemon status: {State.dmypy_daemon_status}")
daemon_args_check = [
*daemon_args,
"check",
*(settings.get("daemon_args", {}).get("check", [])),
]
cmd = [*daemon_args_check, document.path]
logger.info(f"call dmypy check: {_to_dmypy_cmd(cmd)}")
report, errors, exit_status = mypy_api.run_dmypy(cmd)
return parse_run(document, report, errors, exit_status)
@atexit.register
def close() -> None:
if State.dmypy_daemon_status is not None:
daemon_args = ["--status-file", State.dmypy_status_file]
cmd = [*daemon_args, "kill"]
logger.info(f"call dmypy kill: {_to_dmypy_cmd(cmd)}")
mypy_api.run_dmypy(cmd)
if State.livemode_tmpfile:
os.unlink(State.livemode_tmpfile.name) # cleanup tmpfile
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Elemental Appliances and Software Activation Service"
prefix = "elemental-activations"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CompleteAccountRegistration = Action("CompleteAccountRegistration")
CompleteFileUpload = Action("CompleteFileUpload")
DownloadSoftware = Action("DownloadSoftware")
GenerateLicenses = Action("GenerateLicenses")
GetActivation = Action("GetActivation")
ListTagsForResource = Action("ListTagsForResource")
StartAccountRegistration = Action("StartAccountRegistration")
StartFileUpload = Action("StartFileUpload")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
|
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
from .modeling_hart import HaRTBasePreTrainedModel
from .hart import HaRTPreTrainedModel
class HaRTForSequenceClassification(HaRTBasePreTrainedModel):
# _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config, model_name_or_path=None, pt_model=None):
super().__init__(config)
self.freeze_model = config.freeze_model
self.num_labels = config.num_labels
self.finetuning_task = config.finetuning_task
self.use_history_output = config.use_history_output
self.use_hart_no_hist = config.use_hart_no_hist
if model_name_or_path:
self.transformer = HaRTPreTrainedModel.from_pretrained(model_name_or_path)
elif pt_model:
self.transformer = pt_model
else:
self.transformer = HaRTPreTrainedModel(config)
self.init_weights()
if not self.freeze_model and not self.finetuning_task=='ope' and not self.finetuning_task=='user':
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
if self.finetuning_task=='age':
self.transform = nn.Linear(config.n_embd, config.n_embd)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
def get_pooled_logits(self, logits, input_ids, inputs_embeds):
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
# since we want the index of the last predicted token of the last block only.
sequence_lengths = sequence_lengths[:, -1]
else:
sequence_lengths = -1
self.logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
# get the logits corresponding to the indices of the last pred tokens (of the last blocks) of each user
pooled_logits = logits[range(batch_size), sequence_lengths]
return pooled_logits
def forward(
self,
input_ids=None,
user_ids=None,
history=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
history=history,
output_block_last_hidden_states=True,
output_block_extract_layer_hs=True,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
all_blocks_last_hidden_states = transformer_outputs.all_blocks_extract_layer_hs if self.freeze_model else transformer_outputs.all_blocks_last_hidden_states
if self.finetuning_task=='user' or self.finetuning_task=='ope' or self.finetuning_task=='age':
if self.use_history_output:
states = transformer_outputs.history[0]
masks = transformer_outputs.history[1]
multiplied = tuple(l * r for l, r in zip(states, masks))
all_blocks_user_states = torch.stack(multiplied, dim=1)
all_blocks_masks = torch.stack(masks, dim=1)
sum = torch.sum(all_blocks_user_states, dim=1)
divisor = torch.sum(all_blocks_masks, dim=1)
hidden_states = sum/divisor
else:
raise ValueError("Since you don't want to use the user-states/history output for a user-level task, please customize the code as per your requirements.")
else:
hidden_states = torch.stack(all_blocks_last_hidden_states, dim=1)
if self.use_hart_no_hist:
logits = self.score(all_blocks_last_hidden_states[0]) if self.freeze_model else self.score(self.ln_f(all_blocks_last_hidden_states[0]))
batch_size, _, sequence_length = input_ids.shape
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
pooled_logits = logits[range(batch_size), sequence_lengths.squeeze()]
else:
if self.finetuning_task=='ope' or self.finetuning_task=='user' or self.freeze_model:
logits = self.score(hidden_states)
elif self.finetuning_task=='age':
self.score(self.transform(self.ln_f(hidden_states)))
else:
logits = self.score(self.ln_f(hidden_states))
pooled_logits = logits if (user_ids is None or self.use_history_output) else \
self.get_pooled_logits(logits, input_ids, inputs_embeds)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
else:
labels = labels.long()
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
|
import torch
import torch.nn as nn
import logging
logger = logging.getLogger(__name__)
def get_concat(concat: str, embedding_dim: int):
"""
:param concat: Concatenation style
:param embedding_dim: Size of inputs that are subject to concatenation
:return: Function that performs concatenation, Size of concatenation output
"""
concat_func = None
concat_dim = None
if concat == 'simple':
concat_func = lambda a, b: torch.cat((a, b), dim=1)
concat_dim = 2 * embedding_dim
elif concat == 'dif':
# x = np.abs(a-b)
concat_func = lambda a, b: (a - b).abs()
concat_dim = 1 * embedding_dim
elif concat == 'prod':
# x = a * b
concat_func = lambda a, b: a * b
concat_dim = 1 * embedding_dim
elif concat == 'dif-prod':
# x = np.hstack((np.abs(a-b), a * b))
concat_func = lambda a, b: torch.cat(((a - b).abs(), a * b), dim=1)
concat_dim = 2 * embedding_dim
elif concat == '3d-prod':
# x = np.hstack((a, b, a*b))
concat_func = lambda a, b: torch.cat((a, b, a * b), dim=1)
concat_dim = 3 * embedding_dim
elif concat == '3d-dif':
# x = np.hstack((a, b, np.abs(a-b)))
concat_func = lambda a, b: torch.cat((a, b, (a - b).abs()), dim=1)
concat_dim = 3 * embedding_dim
elif concat == '4d-prod-dif':
# x = np.hstack((a, b, a*b, np.abs(a-b)))
concat_func = lambda a, b: torch.cat((a, b, a * b, (a - b).abs()), dim=1)
concat_dim = 4 * embedding_dim
else:
raise ValueError('Unsupported concat mode')
logger.debug(f'concat_dim = {concat_dim} ({concat})')
return concat_func, concat_dim
def get_mlp(input_dim, output_dim, hidden_dim, hidden_layers_count=1, dropout_p=0., activation_cls=nn.ReLU):
"""
Generate a fully-connected layer (MLP) with dynamic input, output and hidden dimension, and hidden layer count.
- when dropout_p > 0, then dropout is applied with given probability after the activation function.
:param input_dim:
:return: Sequential layer
"""
layers = [
# first layer
nn.Linear(input_dim, hidden_dim),
activation_cls(),
]
if dropout_p > 0:
layers.append(nn.Dropout(dropout_p))
for layer_idx in range(1, hidden_layers_count):
layers.append(nn.Linear(hidden_dim, hidden_dim)),
layers.append(activation_cls()),
if dropout_p > 0:
layers.append(nn.Dropout(dropout_p))
# last layer
layers.append(nn.Linear(hidden_dim, output_dim))
# TODO fill linear layers
# nn.init.xavier_normal_(self.classifier.weight)
# Fills the input Tensor with values according to the method described in “Understanding the difficulty of training deep feedforward neural networks” - Glorot, X. & Bengio, Y. (2010), using a normal distribution.
# kaiming_normal_
# Fills the input Tensor with values according to the method described in “Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification” - He, K. et al. (2015), using a normal distribution.
return nn.Sequential(*layers)
|
# Generated by Django 3.1.4 on 2021-12-12 17:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('answer', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(default='No Category', on_delete=django.db.models.deletion.SET_DEFAULT, to='faq.category')),
],
),
]
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Firefox(executable_path = '/media/vivek/Coding Stuffs/Python_Projects/geckodriver')
driver.get("http://wwww.python.org")
assert "Python" in driver.title
elem = driver.find_element_by_name("q")
elem.clear()
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
driver.close()
|
n = input()
s = ""
for i in n:
if i==" ":
s += i
elif i.isdigit():
s += i
elif i.islower():
j = ord(i)+13
s += chr(j) if j<123 else chr(j-26)
elif i.isupper():
j = ord(i)+13
s += chr(j) if j<91 else chr(j-26)
print(s)
|
import os
import shutil
SOURCE = 'source'
QRC_FILE = 'resource.qrc'
primary_icons = os.listdir(SOURCE)
contex = [
('disabled', '#ff0000'),
('primary', '#0000ff'),
]
qrc = {
'icon': [],
}
# ----------------------------------------------------------------------
def replace_color(content, replace, color='#0000ff'):
""""""
colors = [color] + [''.join(list(color)[:i] + ['\\\n'] + list(color)[i:]) for i in range(1, 7)]
for c in colors:
content = content.replace(c, replace)
replace = '#ffffff00'
color = '#000000'
colors = [color] + [''.join(list(color)[:i] + ['\\\n'] + list(color)[i:]) for i in range(1, 7)]
for c in colors:
content = content.replace(c, replace)
return content
# ----------------------------------------------------------------------
def create_qrc(qrc):
""""""
with open(QRC_FILE, 'w') as file:
file.write('<RCC>\n')
for key in qrc:
file.write(f' <qresource prefix="{key}">\n')
for icon in qrc[key]:
# icon = icon.replace(f'{key}/', '')
file.write(f' <file>{icon}</file>\n')
file.write(f' </qresource>\n')
file.write('</RCC>\n')
for folder, _ in contex:
shutil.rmtree(folder, ignore_errors=True)
os.mkdir(folder)
# qrc[folder] = []
for icon in primary_icons:
if not icon.endswith('.svg'):
continue
with open(os.path.join(SOURCE, icon), 'r') as file_input:
original = file_input.read()
for folder, color in contex:
new_content = replace_color(original, color)
file_to_write = os.path.join(folder, icon)
# qrc[folder] += [file_to_write]
qrc['icon'] += [file_to_write]
with open(file_to_write, 'w') as file_output:
file_output.write(new_content)
# print(f"created {file_to_write}")
create_qrc(qrc)
# RCC = '/usr/lib/python3.8/site-packages/PySide2/rcc'
RCC = 'rcc -g python --no-compress --verbose'
command = f"{RCC} {QRC_FILE} -o {QRC_FILE.replace('.qrc', '_rc.py')}"
# print(command)
os.system(command)
|
import feedparser
import html
from datetime import datetime
from park_api.geodata import GeoData
from park_api.util import utc_now
geodata = GeoData(__file__)
def parse_html(xml_data):
feed = feedparser.parse(xml_data)
try:
last_updated = feed["entries"][0]["updated"]
last_updated = datetime.strptime(last_updated[5:25], "%d %b %Y %H:%M:%S").isoformat()
except KeyError:
last_updated = utc_now()
data = {
"lots": [],
"last_updated": last_updated
}
for entry in feed["entries"]:
summary = parse_summary(entry["summary"])
title_elements = parse_title(entry["title"])
lot_identifier = html.unescape((title_elements[2] + " " + title_elements[0]).strip())
lot = geodata.lot(lot_identifier)
data["lots"].append({
"name": html.unescape(title_elements[0]),
"address": lot.address,
"id": html.unescape(lot.id),
"state": "open",
"free": summary[1],
"total": lot.total,
"coords": lot.coords,
"forecast": False,
"lot_type": title_elements[2]
})
return data
def parse_summary(summary):
"""Parse a string from the format 'Anzahl freie Parkplätze: 179' into both its params"""
summary = summary.split(":")
summary[0] = summary[0].strip()
if "?" in summary[0]:
summary[0] = "nodata"
try:
summary[1] = int(summary[1])
except ValueError:
summary[1] = 0
return summary
def parse_title(title):
"""
Parse a string from the format 'Parkhaus Bad. Bahnhof'
"""
types = ["Parkhaus", "Parkplatz"]
name = title
address = ''
type = ""
if name.split(" ")[0] in types:
type = name.split(" ")[0]
name = " ".join(name.split(" ")[1:])
return name, address, type
|
"""
molconfviewer - Visualize molecule conformations in Jupyter
"""
__version__ = "0.1.0"
import ipywidgets
import py3Dmol
from ipywidgets import interact, fixed
from rdkit import Chem
from rdkit.Chem.rdchem import Mol
from typing import Tuple
class MolConfViewer():
"""Class to generate views of molecule conformations
:param widget_size: canvas size
:type widget_size: tuple(int, int)
:param style: type of drawing molecule, see 3dmol.js
:type style: str in ['line', 'stick', 'sphere', 'cartoon']
:param draw_surface: display SAS
:type draw_surface: bool
:param opacity: opacity of surface, ranging from 0 to 1
:type opacity: float
"""
def __init__(self,
widget_size: Tuple[int, int] = (300, 300),
style: str = "stick",
draw_surface: bool = False,
opacity: float = 0.5):
"""Setup the viewer
"""
self.widget_size = widget_size
assert style in ('line', 'stick', 'sphere', 'cartoon')
self.style = style
self.draw_surface = draw_surface
self.opacity = opacity
def view(self, mol: Mol) :
"""View a RDKit molecule in 3D, with a slider to explore conformations.
Largely inspired from
https://birdlet.github.io/2019/10/02/py3dmol_example/
:param mol: molecule to show conformers for
:type mol: Mol
:return: Nothing, prints a jupyter widget to show the molecule
"""
max_conf_id = mol.GetNumConformers() - 1
conf_id_slider = ipywidgets.IntSlider(min=0,
max=max_conf_id,
step=1)
interact(self.get_viewer,
mol=fixed(mol),
conf_id=conf_id_slider)
def get_viewer(self,
mol: Mol,
conf_id: int = -1) -> py3Dmol.view:
"""Draw a given conformation for a molecule in 3D using 3Dmol.js
:param mol: molecule to show conformers for
:type mol: Mol
:param conf_id: id of the RDKit Conformer in the Mol to visualize
:type conf_id: int
:return: molecule viewer for given conf_id
:rtype: py3Dmol.view
"""
mblock = Chem.MolToMolBlock(mol, confId=conf_id)
viewer = py3Dmol.view(width=self.widget_size[0],
height=self.widget_size[1])
viewer.addModel(mblock, 'mol')
viewer.setStyle({self.style:{}})
if self.draw_surface:
viewer.addSurface(py3Dmol.SAS, {'opacity': self.opacity})
viewer.zoomTo()
return viewer
|
#!/usr/bin/python
###################
# Library Imports #
###################
from Oedipus.utils.data import *
from Oedipus.utils.misc import *
from Oedipus.utils.graphics import *
from sklearn import metrics
from sklearn.feature_selection import SelectKBest
from sklearn.decomposition import PCA
from sklearn.cross_validation import KFold
from sklearn import tree
from sklearn.naive_bayes import MultinomialNB # Multinomial Naive Bayes
import numpy
from numpy.random import randn
# For tree visualization
from sklearn.externals.six import StringIO
import pydot
import sys, os
#####################
# Utility Functions #
#####################
def numOfMismatches(s1, s2):
""" Returns number of character mismatches in two strings """
s1Letters = {k: s1.count(k) for k in s1}
s2Letters = {k: s2.count(k) for k in s2}
# Compare matches
s = {}
for k2 in s2Letters:
if k2 in s1Letters.keys():
s[k2] = abs(s1Letters[k2] - s2Letters[k2])
else:
s[k2] = s2Letters[k2]
# Sum up remaining matches
mismatches = sum(s.values())
return mismatches
def findTrend(trend, trendList):
""" Finds a specific trend tuple within a list of tuples """
# Assuming the lack of duplicates
for tIndex in range(len(trendList)):
if len(trendList[tIndex]) == 2:
if trendList[tIndex][0] == trend:
return tIndex, trendList[tIndex]
return -1, ()
def mergeTrends(oldTrends, newTrends):
""" Merges two lists of trend tuples, updating the count on-the-fly """
tempTrends = [] + oldTrends
for tIndex in range(len(newTrends)):
trend = newTrends[tIndex]
oldTrendIndex, oldTrend = findTrend(trend[0], tempTrends)
if oldTrendIndex != -1 and len(oldTrend) > 0:
nTrend = (oldTrend[0], oldTrend[1]+trend[1])
tempTrends.pop(oldTrendIndex)
tempTrends.append(nTrend)
else:
tempTrends.append(trend)
return tempTrends
def cmpTuple(x,y):
""" Compares two tuples to the end of sorting a list of tuples """
if x[1] > y[1]:
return -1
elif x[1] < y[1]:
return 1
else:
return 0
##################
# Main functions #
##################
def classifyNaiveBayes(Xtr, ytr, Xte, yte, reduceDim="none", targetDim=0):
""" Classified data using Naive Bayes """
try:
accuracyRate, timing, probabilities = 0.0, 0.0, []
# Reduce dimensionality if requested
Xtr = reduceDimensionality(Xtr, ytr, reduceDim, targetDim) if reduceDim != "none" else Xtr
Xte = reduceDimensionality(Xte, yte, reduceDim, targetDim) if reduceDim != "none" else Xte
# Make sure values are positive because MultinomialNB doesn't take negative features
Xtr = flipSign(Xtr, "+")
Xte = flipSign(Xte, "+")
# Perform classification
nbClassifier = MultinomialNB()
prettyPrint("Training the Naive Bayes algorithm", "debug")
startTime = time.time()
nbClassifier.fit(numpy.array(Xtr), numpy.array(ytr))
# Now test the trained algorithm
prettyPrint("Submitting the test samples", "debug")
predicted = nbClassifier.predict(Xte)
endTime = time.time()
# Compare the predicted and ground truth
accuracyRate = round(metrics.accuracy_score(predicted, yte), 2)
probabilities = nbClassifier.predict_proba(Xte)
# Finally, calculate the time taken to train and classify
timing = endTime-startTime
except Exception as e:
prettyPrint("Error encountered in \"classifyNaiveBayes\": %s" % e, "error")
return accuracyRate, timing, probabilities, predicted
def classifyNaiveBayesKFold(X, y, kFold=2, reduceDim="none", targetDim=0):
""" Classifies data using Naive Bayes and K-Fold cross validation """
try:
groundTruthLabels, predictedLabels = [], []
accuracyRates = [] # Meant to hold the accuracy rates
# Split data into training and test datasets
trainingDataset, testDataset = [], []
trainingLabels, testLabels = [], []
accuracyRates = []
probabilities = []
timings = []
# Reduce dimensionality if requested
if reduceDim != "none":
X_new = reduceDimensionality(X, y, reduceDim, targetDim)
else:
X_new = X
# Now carry on with classification
kFoldValidator = KFold(n=len(X_new), n_folds=kFold, shuffle=False)
# Make sure values are positive because MultinomialNB doesn't take negative features
X_new = flipSign(X_new, "+")
for trainingIndices, testIndices in kFoldValidator:
# Prepare the training and testing datasets
for trIndex in trainingIndices:
trainingDataset.append(X_new[trIndex])
trainingLabels.append(y[trIndex])
for teIndex in testIndices:
testDataset.append(X_new[teIndex])
testLabels.append(y[teIndex])
# Perform classification
startTime = time.time()
nbClassifier = MultinomialNB()
prettyPrint("Training the Naive Bayes algorithm", "debug")
nbClassifier.fit(numpy.array(trainingDataset), numpy.array(trainingLabels))
prettyPrint("Submitting test samples", "debug")
predicted = nbClassifier.predict(testDataset)
endTime = time.time()
# Add that to the groundTruthLabels and predictedLabels matrices
groundTruthLabels.append(testLabels)
predictedLabels.append(predicted)
# Compare the predicted and ground truth and appent to list
accuracyRates.append(round(metrics.accuracy_score(predicted, testLabels), 2))
# Also append the probability estimates
probs = nbClassifier.predict_proba(testDataset)
probabilities.append(probs)
timings.append(endTime-startTime) # Keep track of performance
trainingDataset, trainingLabels = [], []
testDataset, testLabels = [], []
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
prettyPrint("Error encountered in \"classifyNaiveBayesKFold\" +%s: %s" % (exc_tb.tb_lineno, e), "error")
return [], [], []
return accuracyRates, probabilities, timings, groundTruthLabels, predictedLabels
def classifyTree(Xtr, ytr, Xte, yte, splitCriterion="gini", maxDepth=0, visualizeTree=False):
""" Classifies data using CART """
try:
accuracyRate, probabilities, timing = 0.0, [], 0.0
# Perform classification
cartClassifier = tree.DecisionTreeClassifier(criterion=splitCriterion, max_depth=maxDepth)
startTime = time.time()
prettyPrint("Training a CART tree for classification using \"%s\" and maximum depth of %s" % (splitCriterion, maxDepth), "debug")
cartClassifier.fit(numpy.array(Xtr), numpy.array(ytr))
prettyPrint("Submitting the test samples", "debug")
predicted = cartClassifier.predict(Xte)
endTime = time.time()
# Compare the predicted and ground truth and append result to list
accuracyRate = round(metrics.accuracy_score(predicted, yte), 2)
# Also append the probability estimates
probs = cartClassifier.predict_proba(Xte)
probabilities.append(probs)
timing = endTime-startTime # Keep track of performance
if visualizeTree:
# Visualize the tree
dot_data = StringIO()
tree.export_graphviz(cartClassifier, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
prettyPrint("Saving learned CART to \"tritonTree_%s.pdf\"" % getTimestamp(), "debug")
graph.write_pdf("tree_%s.pdf" % getTimestamp())
except Exception as e:
prettyPrint("Error encountered in \"classifyTree\": %s" % e, "error")
return accuracyRate, timing, probabilities, predicted
def classifyTreeKFold(X, y, kFold=2, splitCriterion="gini", maxDepth=0, visualizeTree=False):
""" Classifies data using CART and K-Fold cross validation """
try:
groundTruthLabels, predictedLabels = [], []
accuracyRates = [] # Meant to hold the accuracy rates
# Split data into training and test datasets
trainingDataset, testDataset = [], []
trainingLabels, testLabels = [], []
accuracyRates = []
probabilities = []
timings = []
kFoldValidator = KFold(n=len(X), n_folds=kFold, shuffle=False)
currentFold = 1
for trainingIndices, testIndices in kFoldValidator:
# Prepare the training and testing datasets
for trIndex in trainingIndices:
trainingDataset.append(X[trIndex])
trainingLabels.append(y[trIndex])
for teIndex in testIndices:
testDataset.append(X[teIndex])
testLabels.append(y[teIndex])
# Perform classification
startTime = time.time()
cartClassifier = tree.DecisionTreeClassifier(criterion=splitCriterion, max_depth=maxDepth)
prettyPrint("Training a CART tree for classification using \"%s\" and maximum depth of %s" % (splitCriterion, maxDepth), "debug")
cartClassifier.fit(numpy.array(trainingDataset), numpy.array(trainingLabels))
prettyPrint("Submitting the test samples", "debug")
predicted = cartClassifier.predict(testDataset)
endTime = time.time()
# Add that to the groundTruthLabels and predictedLabels matrices
groundTruthLabels.append(testLabels)
predictedLabels.append(predicted)
# Compare the predicted and ground truth and append result to list
accuracyRates.append(round(metrics.accuracy_score(predicted, testLabels), 2))
# Also append the probability estimates
probs = cartClassifier.predict_proba(testDataset)
probabilities.append(probs)
timings.append(endTime-startTime) # Keep track of performance
if visualizeTree:
# Visualize the tree
dot_data = StringIO()
tree.export_graphviz(cartClassifier, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
prettyPrint("Saving learned CART to \"tritonTree_%s.pdf\"" % currentFold, "debug")
graph.write_pdf("tritonTree_%s.pdf" % currentFold)
trainingDataset, trainingLabels = [], []
testDataset, testLabels = [], []
currentFold += 1
except Exception as e:
prettyPrint("Error encountered in \"classifyTreeKFold\": %s" % e, "error")
return [], [], []
return accuracyRates, probabilities, timings, groundTruthLabels, predictedLabels
def reduceDimensionality(X, y, method="selectkbest", targetDim=10):
""" Reduces the dimensionality of [X] to [targetDim] """
try:
# Check for the required methodology first
if method.lower() == "selectkbest":
prettyPrint("Selecting %s best features from dataset" % targetDim, "debug")
kBestSelector = SelectKBest(k=targetDim)
X_new = kBestSelector.fit_transform(X, y).tolist()
elif method.lower() == "pca":
prettyPrint("Extracting %s features from dataset using PCA" % targetDim, "debug")
pcaExtractor = PCA(n_components=targetDim)
# Make sure vectors in X are positive
X_new = pcaExtractor.fit_transform(X, y).tolist()
else:
prettyPrint("Unknown dimensionality reduction method \"%s\"" % method, "warning")
return X
except Exception as e:
prettyPrint("Error encountered in \"reduceDimensionality\": %s" % e, "error")
return X
# Return the reduced dataset
return X_new
def gatherStatsFromLog(fileName, expType, accuracyMode):
""" Parses a classification dump file to calculate accuracies and confusion matrix """
if not os.path.exists(fileName):
prettyPrint("File \"%s\" does not exist. Exiting." % fileName, "warning")
return False
fileContent = open(fileName).read()
# Group results by tree depth
allLines = fileContent.split('\n')
allDepths = {}
currentDepth, currentTrends = "", []
skip = True #TODO contributes to focusing on a certain depth/dimensionality
print "[*] Parsing content..."
lineCount = 0
for line in allLines:
if line.lower().find("tree depth:") != -1 or line.lower().find("target dimensionality:") != -1:
# TODO: Focusing on the tree depth of 8 and the target dimensionality of 64
if line.lower().find("tree depth: 8") == -1 and line.lower().find("target dimensionality: 64") == -1:
prettyPrint("Skipping %s" % line, "debug")
# Make sure we merge the 10th iteration
if len(currentTrends) > 0:
if currentDepth in allDepths.keys():
prettyPrint("Merging trends at %s" % line, "debug")
allDepths[currentDepth] = mergeTrends(allDepths[currentDepth], currentTrends)
currentTrends = []
skip = True
continue
skip = False
currentDepth = line.split(": ")[1]
prettyPrint("Processing %s:" % line, "debug")
if len(currentTrends) > 0:
# Store previous depth and reset it
if currentDepth in allDepths.keys():
prettyPrint("Merging trends at %s" % line, "debug")
allDepths[currentDepth] = mergeTrends(allDepths[currentDepth], currentTrends)
else:
prettyPrint("Adding new trend's list at %s" % line, "debug")
allDepths[currentDepth] = currentTrends
currentTrends = []
elif line.find("Class") != -1 and not skip:
#lineCount += 1
# Extract class and predicted
if expType == "exp1":
currentClass = line.split(',')[0].split(':')[1]
currentPredicted = line.split(',')[1].split(':')[1]
elif expType == "exp2":
currentClass = line.split()[2][:-1]
currentPredicted = line.split()[-1]
else:
prettyPrint("Unsupported experiment type \"%s\". Exiting" % expType, "debug")
trend = "%s (as) %s" % (currentClass, currentPredicted)
# Check whether trend exists in current trends
trendIndex, oldTrend = findTrend(trend, currentTrends)
if trendIndex != -1 and len(oldTrend) > 0:
# If yes, update count
newTrend = (trend, currentTrends[trendIndex][1]+1)
#print newTrend
currentTrends.pop(trendIndex)
# Add to currentTrends
currentTrends.append(newTrend)
else:
# else, add and set to zero
newTrend = (trend, 1)
# Add to currentTrends
currentTrends.append(newTrend)
# Now sort the trends for all Depths
prettyPrint("Sorting trends according to occurrence.")
for tDepth in allDepths:
allDepths[tDepth].sort(cmp=cmpTuple)
# Display ordered trends
keys = [int(x) for x in allDepths.keys()]
keys.sort()
allClasses, trends = [], []
for tDepth in keys:
print len(allDepths[str(tDepth)])
print "================================"
print "[*] Dimensionality / Depth: %s" % tDepth
print "================================"
total = 0
for trend in allDepths[str(tDepth)]:
trends.append(trend)
print "[*] %s encountered %s time(s)" % (trend[0], trend[1])
total += trend[1]
# Parse trend name
class1 = trend[0].split(" (as) ")[0]
class2 = trend[0].split(" (as) ")[1]
if not class1 in allClasses:
allClasses.append(class1)
if not class2 in allClasses:
allClasses.append(class2)
# Sort classes alphabetically
allClasses.sort()
encodedClasses = {i+1: allClasses[i] for i in range(len(allClasses))}
print "----------------------------------"
print "[*] Total trend occurrences: %s" % total
print "----------------------------------"
# 2- Build a matrix of zeros
confusionMatrix = numpy.zeros((len(allClasses), len(allClasses)))
# 3- iterate over trends and extraxt classes
for trend in trends:
class1, class2 = trend[0].split(" (as) ")[0], trend[0].split(" (as) ")[1]
count = trend[1]
# 4- Populate corresponding cells
#print allClasses.index(class1), allClasses.index(class2), count
confusionMatrix[allClasses.index(class1)][allClasses.index(class2)] += count
# 5-Save to file
newFileName = fileName.replace("classificationlog", "confusionmatrix").replace(".txt", ".csv")
numpy.savetxt(newFileName, confusionMatrix, delimiter=',', fmt='%i')
# 6- Save class indices to file
#f = open(fileName.replace("classificationlog", "confusionMatrix"), "a")
correct = int(confusionMatrix.trace())
# Update the count of correct instances according to the accuracy mode
for trend in trends:
original, classified = trend[0].split(" (as) ")[0], trend[0].split(" (as) ")[1]
if accuracyMode == "viceversa":
# Count (A+B) classified as (B+A) as correct.
if original != classified and numOfMismatches(original, classified) <= 1:
correct += trend[1]
prettyPrint("Vice-versa trend %s found. Updating correctly-classified trends." % trend[0], "debug")
elif accuracyMode == "jit":
# Count (X+Jit) classified as (Jit) as correct ==> Jit is dominant
if original != classified and original.find("Ji") != -1 and classified.find("Ji") != -1:
correct += trend[1]
prettyPrint("Jit trend %s found. Updating correctly-classified trends." % trend[0], "debug")
elif accuracyMode == "both":
# Implement both accuracy modes
if original != classified and original.find("Ji") != -1 and classified.find("Ji") != -1:
correct += trend[1]
prettyPrint("Jit trend %s found. Updating correctly-classified trends." % trend[0], "debug")
elif original != classified and numOfMismatches(original, classified) <= 1:
correct += trend[1]
prettyPrint("Vice-versa trend %s found. Updating correctly-classified trends." % trend[0], "debug")
incorrect = int(total - correct)
accuracy = round((float(correct)/float(total))*100.0, 2)
#f.write("\n Correctly classified: %s, incorrectly classified: %s, Classification accuracy: %s%%, Total trends: %s\n" % (correct, incorrect, accuracy, total))
#f.write("\n %s" % str(encodedClasses))
#f.close()
#print lineCount
print "----------------------------------"
print "[*] Accuracy mode: %s\n[*] Correctly classified: %s\n[*] Incorrectly classified: %s\n[*] Classification accuracy: %s%%\n[*] Total trends: %s" % (accuracyMode, correct, incorrect, accuracy, total)
print "----------------------------------"
allKeys = encodedClasses.keys()
allKeys.sort()
for k in allKeys:
print "[%s] => \"%s\" => %s" % (k, chr(k+96).upper(), encodedClasses[k])
|
class Swimmers:
def getSwimTimes(self, distances, speeds, current):
def time(d, s):
if d == 0:
return 0
if s <= current:
return -1
d = float(d)
return int(d / (s + current) + d / (s - current))
return map(time, distances, speeds)
|
#%%
# import packages
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import datetime
from finrl.apps import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.environment import EnvSetup
from finrl.env.EnvMultipleStock_train import StockEnvTrain
from finrl.env.EnvMultipleStock_trade import StockEnvTrade
from finrl.model.models import DRLAgent
from finrl.trade.backtest import BackTestStats, BaselineStats, BackTestPlot, backtest_strat, baseline_strat
from finrl.trade.backtest import backtest_strat, baseline_strat
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
#%%
# Download and save the data in a pandas DataFrame:
df = YahooDownloader(start_date = '2008-01-01',
end_date = '2020-12-01',
ticker_list = config.DOW_30_TICKER).fetch_data()
#%%
# Perform Feature Engineering:
df = FeatureEngineer(df.copy(),
use_technical_indicator=True,
use_turbulence=False).preprocess_data()
# add covariance matrix as states
df=df.sort_values(['date','tic'],ignore_index=True)
df.index = df.date.factorize()[0]
cov_list = []
# look back is one year
lookback=252
for i in range(lookback,len(df.index.unique())):
data_lookback = df.loc[i-lookback:i,:]
price_lookback=data_lookback.pivot_table(index = 'date',columns = 'tic', values = 'close')
return_lookback = price_lookback.pct_change().dropna()
covs = return_lookback.cov().values
cov_list.append(covs)
df_cov = pd.DataFrame({'date':df.date.unique()[lookback:],'cov_list':cov_list})
df = df.merge(df_cov, on='date')
df = df.sort_values(['date','tic']).reset_index(drop=True)
df.head()
#%%
stock_dimension = len(train.tic.unique())
state_space = stock_dimension
# Initialize env:
env_setup = EnvSetup(stock_dim = stock_dimension,
state_space = state_space,
initial_amount = 1000000,
tech_indicator_list = config.TECHNICAL_INDICATORS_LIST)
env_train = env_setup.create_env_training(data = train,
env_class = StockPortfolioEnv)
|
import unittest
from libpysal.examples import load_example
import geopandas as gpd
import numpy as np
from segregation.aspatial import Dissim
from segregation.decomposition import DecomposeSegregation
class Decomposition_Tester(unittest.TestCase):
def test_Decomposition(self):
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
index1 = Dissim(s_map, 'HISP_', 'TOT_POP')
index2 = Dissim(s_map, 'BLACK_', 'TOT_POP')
res = DecomposeSegregation(index1, index2, counterfactual_approach = "composition")
np.testing.assert_almost_equal(res.c_a, -0.16138819842911295)
np.testing.assert_almost_equal(res.c_s, -0.005104643275796905)
res.plot(plot_type = 'cdfs')
res.plot(plot_type = 'maps')
res = DecomposeSegregation(index1, index2, counterfactual_approach = "share")
np.testing.assert_almost_equal(res.c_a, -0.1543828579279878)
np.testing.assert_almost_equal(res.c_s, -0.012109983776922045)
res.plot(plot_type = 'cdfs')
res.plot(plot_type = 'maps')
res = DecomposeSegregation(index1, index2, counterfactual_approach = "dual_composition")
np.testing.assert_almost_equal(res.c_a, -0.16159526946235048)
np.testing.assert_almost_equal(res.c_s, -0.004897572242559378)
res.plot(plot_type = 'cdfs')
res.plot(plot_type = 'maps')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python3
from typing import List
import re
class GanetiNode:
name = ""
shortname = ""
total_memory = 0
used_memory = 0
free_memory = 0
total_disk = 0
free_disk = 0
total_cpus = 0
status = ""
group_uuid = ""
spindles = 0
tags: List[str] = []
exclusive_storage = ""
free_spindles = 0
node_cpus = 0
cpu_speed = ""
def __init__(self, name, total_memory, used_memory, free_memory, total_disk, free_disk, total_cpus, status, group_uuid, spindles, tags, exclusive_storage, free_spindles, node_cpus, cpu_speed):
self.name = name
self.shortname = re.sub(r'\..*', '', name)
self.total_memory = total_memory
self.used_memory = used_memory
self.free_memory = free_memory
self.total_disk = total_disk
self.free_disk = free_disk
self.total_cpus = total_cpus
self.status = status
self.group_uuid = group_uuid
self.spindles = spindles
self.tags = tags
self.exclusive_storage = exclusive_storage
self.free_spindles = free_spindles
self.node_cpus = node_cpus
self.cpu_speed = cpu_speed
def __eq__(self, other):
return self.name == other.name
|
from __future__ import print_function
import argparse
import codecs
import logging
import time
import numpy
import pandas as pd
from pandas.io.json import json_normalize
import os
import json
def read_data(folderpath):
start = time.time()
logging.debug("reading data from %s", folderpath)
# creating empty df
tracks_df = pd.DataFrame(columns=['pid','track_uri'])
path_to_json = folderpath
json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
json_files.sort()
# we need both the json and an index number so use enumerate()
for index, js in enumerate(json_files):
s = time.time()
with open(os.path.join(path_to_json, js)) as json_file:
j = json.load(json_file)
# extracting tracks from playlists in each slice
tracks = json_normalize(j['playlists'], record_path='tracks',
meta=['pid'])
# append tracks to tracks_df
tracks = tracks[['pid', 'track_uri']]
tracks_df = tracks_df.append(tracks)
#print('reading slice #'+ str(index) + ' in : '+ str(s-time.time()))
logging.debug("read data file in %s", time.time() - start)
start = time.time()
logging.debug("writing data to file")
tracks_df.to_csv('../my_data/mpd.tsv', sep='\t', index=False)
logging.debug("wrote data file in %s", time.time() - start)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Reads the MPD, trims and saves it to a .tsv file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str,
dest='folderpath', help='specify path to folder with spotify-mpd json slices', required=True)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
read_data(args.folderpath)
|
import rospy
from visualization_msgs.msg import Marker
from . import make_markers
class MarkerHelper(object):
def __init__(self, topic, frame_id='/world'):
self.pub = rospy.Publisher(topic, Marker)
self.frame_id = frame_id
def publish(self, marker):
self.pub.publish(marker.to_msg(frame_id=self.frame_id))
def wireframe_box(*args, **kwargs):
marker = make_markers.WireframeBoxMarker(*args, **kwargs)
self.publish(marker)
|
import importlib
import logging
logger = logging.getLogger(__name__)
class CI:
def __init__(self, module):
self.module = module
def __enter__(self):
try:
importlib.import_module(self.module)
self.module_available = True
except ModuleNotFoundError:
self.module_available = False
def __exit__(self, type, value, traceback):
if type == ModuleNotFoundError and not self.module_available:
logger.debug(
"Module '{}' is not available, discarding import error...".format(
self.module
)
)
return True
|
#! python3
"""Prints a grid from a list of lists of characters."""
test_grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']]
def picture_grid(grid):
"""Takes a list of lists containing characters and prints a grid.
Args:
grid: The list of lists containing the characters.
"""
for column in range(len(grid[0])):
for row in range(len(grid)):
print(grid[row][column], end = "")
print("")
picture_grid(test_grid)
|
# Hash Table; Trie
# In English, we have a concept called root, which can be followed by some other words to form another longer word - let's call this word successor. For example, the root an, followed by other, which can form another word another.
#
# Now, given a dictionary consisting of many roots and a sentence. You need to replace all the successor in the sentence with the root forming it. If a successor has many roots can form it, replace it with the root with the shortest length.
#
# You need to output the sentence after the replacement.
#
# Example 1:
# Input: dict = ["cat", "bat", "rat"]
# sentence = "the cattle was rattled by the battery"
# Output: "the cat was rat by the bat"
# Note:
# The input will only have lower-case letters.
# 1 <= dict words number <= 1000
# 1 <= sentence words number <= 1000
# 1 <= root length <= 100
# 1 <= sentence words length <= 1000
class Solution:
def replaceWords(self, dict, sentence):
"""
:type dict: List[str]
:type sentence: str
:rtype: str
"""
dict = set(dict)
sentenceList = sentence.split()
for idx, val in enumerate(sentenceList):
for i in range(1, len(val)):
if val[:i] in dict:
sentenceList[idx] = val[:i]
break
return " ".join(sentenceList)
|
import io
import re
from setuptools import setup
from setuptools import find_packages
DESCRIPTION = "Python client for the https://api.radio-browser.info"
def get_version():
content = open("pyradios/__init__.py").read()
mo = re.search(r"__version__\s+=\s+'([^']+)'", content)
if not mo:
raise RuntimeError(
'Unable to find version string in pyradios/__init__.py'
)
return mo[1]
def readme():
with io.open("README.md", "r", encoding="utf-8") as f:
return f.read()
def required(sfx=''):
with open(f"requirements{sfx}.txt") as f:
return f.read().splitlines()
setup(
name="pyradios",
version=get_version(),
description=DESCRIPTION,
long_description=readme(),
long_description_content_type="text/markdown",
keywords="pyradios wrapper radios api",
author="André P. Santos",
author_email="andreztz@gmail.com",
url="https://github.com/andreztz/pyradios",
license="MIT",
packages=find_packages(),
install_requires=required(),
extras_require={'dev': required('-dev')},
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Console",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
python_requires=">=3.6",
project_urls={
"Source": "https://github.com/andreztz/pyradios/",
"Upstream": "https://api.radio-browser.info/",
},
)
|
"""This module calculates max ramp rates for each plant component in the EPA CEMS dataset.
Outputs:
<user_arg>.csv:
component-level aggregates.
See results/README.md for field descriptions.
<user_arg>_crosswalk_with_IDs.csv:
inner join of EPA crosswalk and CEMS id columns.
This can be used to inspect the EPA/EIA units that make up a component.
You can also compare to the original crosswalk or original CEMS to see which units
failed to join and were thus excluded from this analysis."""
import argparse
from pathlib import Path
import sys
from typing import Optional, Sequence
import pandas as pd
from tqdm import tqdm
# from pudl.constants import us_states
from ramprate.load_dataset import load_epacems, load_epa_crosswalk, ALL_STATES
from ramprate.build_features import process_subset, _remove_irrelevant
# territories are not in EPA CEMS. District of Columbia is.
TERRITORIES = {"MP", "PR", "AS", "GU", "NA", "VI"}
def process(
out_path: str,
chunk_size: int,
start_year: int,
end_year: int,
state_subset: Optional[Sequence[str]] = None,
) -> None:
"""calculate max ramp rates and other metrics per connected subcomponent in EPA CEMS"""
out_path = Path(out_path)
if not out_path.parent.exists():
raise ValueError(f"Parent directory does not exist: {out_path.parent.absolute()}")
if state_subset is None:
# state_subset = us_states.keys() # all states
state_subset = ALL_STATES
# exlude territories, which are not in EPA CEMS
states = [state for state in state_subset if state not in TERRITORIES]
# minimum subset of columns to load
cems_cols = [
"plant_id_eia",
"unitid",
"operating_datetime_utc",
"gross_load_mw",
"unit_id_epa",
]
years = list(range(start_year, end_year + 1))
crosswalk = load_epa_crosswalk()
crosswalk = _remove_irrelevant(crosswalk) # remove unmatched or non-exporting
# process in chunks due to memory constraints.
# If you use an instance with 10+ GB memory per year of data analyzed, this won't be necessary.
aggregates = []
modified_crosswalk = []
offset = 0
chunks = [states[i : i + chunk_size] for i in range(0, len(states), chunk_size)]
for subset_states in tqdm(chunks):
cems = load_epacems(states=subset_states, years=years, columns=cems_cols, engine="pandas")
cems.set_index(
["unit_id_epa", "operating_datetime_utc"],
drop=False,
inplace=True,
)
cems.sort_index(inplace=True)
outputs = process_subset(cems, crosswalk, component_id_offset=offset)
agg = outputs["component_aggs"]
# convert iterable types to something more amenable to csv
agg["EIA_UNIT_TYPE"] = agg["EIA_UNIT_TYPE"].transform(lambda x: str(tuple(x)))
aggregates.append(agg)
modified_crosswalk.append(outputs["key_map"])
offset += agg.index.max() + 1 # prevent ID overlap when using chunking
aggregates = pd.concat(aggregates, axis=0)
aggregates.to_csv(out_path)
modified_crosswalk = pd.concat(modified_crosswalk, axis=0)
modified_crosswalk.to_csv(out_path.parent / f"{out_path.stem}_crosswalk_with_IDs.csv")
return
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("out_path", type=str, help="""Output path of csv file""")
parser.add_argument(
"--chunk_size",
type=int,
default=5,
help="""processing is chunked by US states. This arg selects the number of states per chunk. Default is 5. If your instance has 10+ GB memory per year of data analyzed, chunking is unnecessary so set to 55""",
)
parser.add_argument(
"--start_year",
type=int,
default=2015,
help="""first year of CEMS data to include in analysis. Inclusive. Default is 2015.""",
)
parser.add_argument(
"--end_year",
type=int,
default=2019,
help="""last year of CEMS data to include in analysis. Inclusive. Default is 2019.""",
)
parser.add_argument(
"--state_subset",
type=str,
default=None,
nargs="*",
help="""optional list of state abbreviations to include in the analysis. Default is all states""",
)
args = parser.parse_args(sys.argv[1:])
sys.exit(
process(
args.out_path,
chunk_size=args.chunk_size,
start_year=args.start_year,
end_year=args.end_year,
state_subset=args.state_subset,
)
)
|
"""
Event store.
"""
import psycopg2
from microcosm_postgres.models import Model
from microcosm_postgres.store import Store
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.exc import OperationalError
from microcosm_eventsource.errors import (
ConcurrentStateConflictError,
ContainerLockNotAvailableRetry,
)
class EventStore(Store):
"""
Event persistence operations.
"""
def retrieve_most_recent(self, **kwargs):
"""
Retrieve the most recent by container id and event type.
"""
container_id = kwargs.pop(self.model_class.container_id_name)
return self._retrieve_most_recent(
self.model_class.container_id == container_id,
)
def retrieve_most_recent_by_event_type(self, event_type, **kwargs):
"""
Retrieve the most recent by container id and event type.
"""
container_id = kwargs.pop(self.model_class.container_id_name)
return self._retrieve_most_recent(
self.model_class.container_id == container_id,
self.model_class.event_type == event_type,
)
def retrieve_most_recent_with_update_lock(self, **kwargs):
"""
Retrieve the most recent by container id, while taking a ON UPDATE lock with NOWAIT OPTION.
If another instance of event is being processed simultaneously, it would raise LockNotAvailable error.
This method it to serialize event sourcing, if they are processing the same container instance.
"""
container_id = kwargs.pop(self.model_class.container_id_name)
try:
return self._retrieve_most_recent(
self.model_class.container_id == container_id,
for_update=True
)
except OperationalError as exc:
if isinstance(exc.orig, psycopg2.errors.LockNotAvailable):
raise ContainerLockNotAvailableRetry()
raise
def upsert_index_elements(self):
"""
Can be overriden by implementations of event source to upsert based on other index elements
Requires a unique constraint to exist on the index elements
"""
return ["parent_id"]
def upsert_on_index_elements(self, instance):
"""
Upsert an event by index elements.
Uses ON CONFLICT ... DO NOTHING to handle uniqueness constraint violations without
invalidating the current transactions completely.
Depends on an unique constraint on index elements to find the resulting entry.
"""
with self.flushing():
insert_statement = insert(self.model_class).values(
instance._members(),
)
upsert_statement = insert_statement.on_conflict_do_nothing(
index_elements=self.upsert_index_elements(),
)
for member in instance.__dict__.values():
if isinstance(member, Model):
self.session.add(member)
self.session.execute(upsert_statement)
most_recent = self._retrieve_most_recent(
*[
getattr(self.model_class, elem) == getattr(instance, elem)
for elem in self.upsert_index_elements()
]
)
if not most_recent.is_similar_to(instance):
raise ConcurrentStateConflictError()
return most_recent
def _filter(self,
query,
event_type=None,
clock=None,
min_clock=None,
max_clock=None,
parent_id=None,
version=None,
**kwargs):
"""
Filter events by standard criteria.
"""
container_id = kwargs.pop(self.model_class.container_id_name, None)
if container_id is not None:
query = query.filter(self.model_class.container_id == container_id)
if event_type is not None:
query = query.filter(self.model_class.event_type == event_type)
if clock is not None:
query = query.filter(self.model_class.clock == clock)
if min_clock is not None:
query = query.filter(self.model_class.clock >= min_clock)
if max_clock is not None:
query = query.filter(self.model_class.clock <= max_clock)
if parent_id is not None:
query = query.filter(self.model_class.parent_id == parent_id)
if version is not None:
query = query.filter(self.model_class.version == version)
return super(EventStore, self)._filter(query, **kwargs)
def _order_by(self, query, sort_by_clock=False, sort_clock_in_ascending_order=False, **kwargs):
"""
Order events by logical clock.
"""
if sort_by_clock:
if sort_clock_in_ascending_order:
return query.order_by(
self.model_class.clock.asc(),
)
else:
return query.order_by(
self.model_class.clock.desc(),
)
return query.order_by(
self.model_class.container_id.desc(),
self.model_class.clock.desc(),
)
def _retrieve_most_recent(self, *criterion, for_update=False):
"""
Retrieve the most recent event by some criterion.
Note that the default `_order_by` enforces clock ordering.
"""
query = self._order_by(self._query(
*criterion
))
if for_update:
return query.with_for_update(nowait=True).first()
else:
return query.first()
|
# image process
import cv2
import os
import numpy as np
IMGDIR="D:/apidoc/python/OpenCV-3.2.0"
img = cv2.imread(IMGDIR + os.sep + "roi.jpg", )
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_blue = np.array([110,50,50])
upper_blue = np.array([130,255,255])
mask = cv2.inRange(hsv, lowerb=lower_blue, upperb=upper_blue)
for line in mask:
print(line)
res = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('orig', img)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(0)
while k != ord('q'):
k = cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
|
'''
Author: Ful Chou
Date: 2021-01-07 11:38:03
LastEditors: Ful Chou
LastEditTime: 2021-01-07 11:38:33
FilePath: /leetcode/flatten-binary-tree-to-linked-list.py
Description: What this document does
链接:https://leetcode-cn.com/problems/flatten-binary-tree-to-linked-list/
'''
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
写递归的思路:
不要进入递归函数里面去想,要想好当前函数是做什么事情,然后假定 递归函数 已经按照你当前定义的函数做好了,
写好当前定义就好了,递归会自己执行好
"""
if root == None:
return
if root.left: # 以为少一层堆栈可以节约内容,结果看没太大的帮助
self.flatten(root.left)
if root.right:
self.flatten(root.right)
child_right = root.right
root.right = root.left
root.left = None # 原来的左子树 要记得清零
p = root
while p.right:
p = p.right
p.right = child_right
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.