content stringlengths 5 1.05M |
|---|
import logging
import pickle
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
from srl.base.define import EnvAction, EnvInvalidAction, EnvObservation, EnvObservationType, Info, RenderType
from srl.base.env.space import SpaceBase
logger = logging.getLogger(__name__)
@dataclass
class EnvConfig:
name: str
kwargs: Dict = field(default_factory=dict)
# gym
gym_prediction_by_simulation: bool = True
class EnvBase(ABC):
# --------------------------------
# implement properties
# --------------------------------
# --- action
@property
@abstractmethod
def action_space(self) -> SpaceBase:
raise NotImplementedError()
# --- observation
@property
@abstractmethod
def observation_space(self) -> SpaceBase:
raise NotImplementedError()
@property
@abstractmethod
def observation_type(self) -> EnvObservationType:
raise NotImplementedError()
# --- properties
@property
@abstractmethod
def max_episode_steps(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def player_num(self) -> int:
raise NotImplementedError()
# --------------------------------
# implement functions
# --------------------------------
@abstractmethod
def reset(self) -> Tuple[EnvObservation, List[int]]:
"""reset
Returns: (
init_state,
next_player_indices,
)
"""
raise NotImplementedError()
@abstractmethod
def step(
self,
actions: List[EnvAction],
player_indices: List[int],
) -> Tuple[EnvObservation, List[float], bool, List[int], Info]:
"""one step
Args:
actions (List[Optional[EnvAction]): [
player1 action,
player2 action,
...
]
player_indices (List[int]): stepで行動するプレイヤーのindex
# player_indices と actions にあるindexは連動しており、
# player_indices にないindexのactionは None である事
Returns:
(
next_state,
[
player1 reward,
player2 reward,
...
],
done,
next_player_indices,
info,
)
"""
raise NotImplementedError()
@abstractmethod
def backup(self) -> Any:
raise NotImplementedError()
@abstractmethod
def restore(self, data: Any) -> None:
raise NotImplementedError()
# option
def close(self) -> None:
pass
# option
def render_terminal(self, **kwargs) -> None:
raise NotImplementedError()
# option
def render_gui(self, **kwargs) -> None:
raise NotImplementedError()
# option
def render_rgb_array(self, **kwargs) -> np.ndarray:
raise NotImplementedError()
# option
def get_invalid_actions(self, player_index: int) -> List[EnvInvalidAction]:
return []
# option
def action_to_str(self, action: EnvAction) -> str:
return str(action)
# option
def make_worker(self, name: str) -> Optional[Type["srl.base.rl.base.RLWorker"]]:
return None
# option
def get_original_env(self) -> object:
return self
def copy(self):
env = self.__class__()
env.restore(self.backup())
return env
# 実装と実行で名前空間を分けるために別クラスに
class EnvRun:
def __init__(self, env: EnvBase) -> None:
self.env = env
self.init()
def init(self):
self.step_num = 0
self.episode_rewards = None
self.state = None
self.step_rewards = None
self.done = True
self.done_reason = ""
self.next_player_indices = []
self.info = None
# --- with
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self) -> None:
logger.debug(f"env.close")
self.env.close()
# --------------------------------
# implement properties
# --------------------------------
@property
def action_space(self) -> SpaceBase:
return self.env.action_space
@property
def observation_space(self) -> SpaceBase:
return self.env.observation_space
@property
def observation_type(self) -> EnvObservationType:
return self.env.observation_type
@property
def max_episode_steps(self) -> int:
return self.env.max_episode_steps
@property
def player_num(self) -> int:
return self.env.player_num
# ------------------------------------
# episode functions
# ------------------------------------
def reset(self, max_steps: int = -1, timeout: int = -1) -> None:
logger.debug("env.reset")
self.state, self.next_player_indices = self.env.reset()
self.step_num = 0
self.done = False
self.done_reason = ""
self.episode_rewards = np.zeros(self.player_num)
self.t0 = time.time()
self.max_steps = max_steps
self.timeout = timeout
def step(self, actions: List[EnvAction], skip_frames: int = 0, skip_function=None) -> Info:
logger.debug("env.step")
assert (
len(actions) == self.player_num
), "The number of actions does not match. (player: {self.player_num}, actions: {actions})"
for idx in self.next_player_indices:
assert actions[idx] is not None
self.state, rewards, self.done, self.next_player_indices, self.info = self.env.step(
actions, self.next_player_indices
)
self.step_rewards = np.asarray(rewards)
# skip frame の間は同じアクションを繰り返す
for _ in range(skip_frames):
assert self.player_num == 1
self.state, rewards, self.done, self.next_player_indices, self.info = self.env.step(
actions, self.next_player_indices
)
self.step_rewards += np.asarray(rewards)
if self.done:
break
if skip_function is not None:
skip_function()
self.step_num += 1
self.episode_rewards += self.step_rewards
# done step
if self.done:
self.done_reason = "env"
elif self.step_num > self.max_episode_steps:
self.done = True
self.done_reason = "env max steps"
elif self.max_steps > 0 and self.step_num > self.max_steps:
self.done = True
self.done_reason = "episode max steps"
elif self.timeout > 0 and time.time() - self.t0 > self.timeout:
self.done = True
self.done_reason = "timeout"
return self.info
def backup(self) -> Any:
logger.debug("env.backup")
d = [
self.env.backup(),
self.step_num,
self.episode_rewards,
self.state,
self.step_rewards,
self.done,
self.done_reason,
self.next_player_indices,
self.info,
]
return pickle.dumps(d)
def restore(self, data: Any) -> None:
logger.debug("env.restore")
d = pickle.loads(data)
self.env.restore(d[0])
self.step_num = d[1]
self.episode_rewards = d[2]
self.state = d[3]
self.step_rewards = d[4]
self.done = d[5]
self.done_reason = d[6]
self.next_player_indices = d[7]
self.info = d[8]
def render(
self,
mode: Union[str, RenderType] = RenderType.Terminal,
is_except: bool = False,
**kwargs,
) -> Any:
logger.debug(f"env.render({mode})")
if isinstance(mode, str):
for t in RenderType:
if t.value == mode:
mode = t
break
else:
mode = RenderType.NONE
try:
if mode == RenderType.Terminal:
return self.env.render_terminal(**kwargs)
elif mode == RenderType.GUI:
return self.env.render_gui(**kwargs)
elif mode == RenderType.RGB_Array:
return self.env.render_rgb_array(**kwargs)
except NotImplementedError:
# logger.info(f"render NotImplementedError({mode})")
if is_except:
raise
def get_invalid_actions(self, player_index: int) -> List[EnvInvalidAction]:
return self.env.get_invalid_actions(player_index)
def action_to_str(self, action: EnvAction) -> str:
return self.env.action_to_str(action)
def make_worker(self, name: str) -> Optional["srl.base.rl.base.RLWorker"]:
cls = self.env.make_worker(name)
if cls is None:
return None
from srl.base.rl.algorithms.rulebase import RuleBaseConfig
return cls(RuleBaseConfig())
def get_original_env(self) -> object:
return self.env.get_original_env()
# ------------------------------------
# util functions
# ------------------------------------
def samples(self) -> List[EnvAction]:
return [self.action_space.sample(self.get_invalid_actions(i)) for i in range(self.player_num)]
def sample(self, player_index: int) -> EnvAction:
return self.action_space.sample(self.get_invalid_actions(player_index))
def copy(self):
org_env = self.env.__class__()
env = self.__class__(org_env)
env.restore(self.backup())
return env
|
import os
import sys
os.chdir(os.path.abspath(os.path.dirname(sys.argv[0])))
def get_cookies_files(dirpath):
return [os.path.join(dirpath, x) for x in os.listdir(dirpath) if x.endswith(".json") or x.endswith(".txt")]
|
import sys as _sys
from typing import List
from collections import _iskeyword # type: ignore
from tensorboardX import SummaryWriter
import os
SUMMARY_WRITER_DIR_NAME = 'runs'
def get_sample_writer(name, base=".."):
"""Returns a tensorboard summary writer
"""
return SummaryWriter(log_dir=os.path.join(base, SUMMARY_WRITER_DIR_NAME, name))
|
import cPickle as pickle
import hashlib
import hmac
import os
from Crypto.Cipher import AES
class AuthenticationError(Exception): pass
class Crypticle(object):
PICKLE_PAD = "pickle::"
AES_BLOCK_SIZE = 16
SIG_SIZE = hashlib.sha256().digest_size
def __init__(self, key_string, key_size=192):
self.keys = self.extract_keys(key_string, key_size)
self.key_size = key_size
@classmethod
def generate_key_string(cls, key_size=192):
key = os.urandom(key_size / 8 + cls.SIG_SIZE)
return key.encode("base64").replace("\n", "")
@classmethod
def extract_keys(cls, key_string, key_size):
key = key_string.decode("base64")
assert len(key) == key_size / 8 + cls.SIG_SIZE, "invalid key"
return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:]
def encrypt(self, data):
"""encrypt data with AES-CBC and sign it with HMAC-SHA256"""
aes_key, hmac_key = self.keys
pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE
data = data + pad * chr(pad)
iv_bytes = os.urandom(self.AES_BLOCK_SIZE)
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = iv_bytes + cypher.encrypt(data)
sig = hmac.new(hmac_key, data, hashlib.sha256).digest()
return data + sig
def decrypt(self, data):
"""verify HMAC-SHA256 signature and decrypt data with AES-CBC"""
aes_key, hmac_key = self.keys
sig = data[-self.SIG_SIZE:]
data = data[:-self.SIG_SIZE]
if hmac.new(hmac_key, data, hashlib.sha256).digest() != sig:
raise AuthenticationError("message authentication failed")
iv_bytes = data[:self.AES_BLOCK_SIZE]
data = data[self.AES_BLOCK_SIZE:]
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
data = cypher.decrypt(data)
return data[:-ord(data[-1])]
def dumps(self, obj, pickler=pickle):
"""pickle and encrypt a python object"""
return self.encrypt(self.PICKLE_PAD + pickler.dumps(obj))
def loads(self, data, pickler=pickle):
"""decrypt and unpickle a python object"""
data = self.decrypt(data)
# simple integrity check to verify that we got meaningful data
assert data.startswith(self.PICKLE_PAD), "unexpected header"
return pickler.loads(data[len(self.PICKLE_PAD):])
|
#!/usr/bin/env python
# (c) 2016-2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import csv
import os
import sys
from collections import defaultdict
from distutils.version import StrictVersion
from pprint import pformat, pprint
from ansible.parsing.metadata import DEFAULT_METADATA, ParseError, extract_metadata
from ansible.plugins.loader import module_loader
# There's a few files that are not new-style modules. Have to blacklist them
NONMODULE_PY_FILES = frozenset(('async_wrapper.py',))
NONMODULE_MODULE_NAMES = frozenset(os.path.splitext(p)[0] for p in NONMODULE_PY_FILES)
class MissingModuleError(Exception):
"""Thrown when unable to find a plugin"""
pass
def usage():
print("""Usage:
metadata-tool.py report [--version X]
metadata-tool.py add [--version X] [--overwrite] CSVFILE
metadata-tool.py add-default [--version X] [--overwrite]
medatada-tool.py upgrade [--version X]""")
sys.exit(1)
def parse_args(arg_string):
if len(arg_string) < 1:
usage()
action = arg_string[0]
version = None
if '--version' in arg_string:
version_location = arg_string.index('--version')
arg_string.pop(version_location)
version = arg_string.pop(version_location)
overwrite = False
if '--overwrite' in arg_string:
overwrite = True
arg_string.remove('--overwrite')
csvfile = None
if len(arg_string) == 2:
csvfile = arg_string[1]
elif len(arg_string) > 2:
usage()
return action, {'version': version, 'overwrite': overwrite, 'csvfile': csvfile}
def find_documentation(module_data):
"""Find the DOCUMENTATION metadata for a module file"""
start_line = -1
mod_ast_tree = ast.parse(module_data)
for child in mod_ast_tree.body:
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'DOCUMENTATION':
start_line = child.lineno - 1
break
return start_line
def remove_metadata(module_data, start_line, start_col, end_line, end_col):
"""Remove a section of a module file"""
lines = module_data.split('\n')
new_lines = lines[:start_line]
if start_col != 0:
new_lines.append(lines[start_line][:start_col])
next_line = lines[end_line]
if len(next_line) - 1 != end_col:
new_lines.append(next_line[end_col:])
if len(lines) > end_line:
new_lines.extend(lines[end_line + 1:])
return '\n'.join(new_lines)
def insert_metadata(module_data, new_metadata, insertion_line, targets=('ANSIBLE_METADATA',)):
"""Insert a new set of metadata at a specified line"""
assignments = ' = '.join(targets)
pretty_metadata = pformat(new_metadata, width=1).split('\n')
new_lines = []
new_lines.append('{0} = {1}'.format(assignments, pretty_metadata[0]))
if len(pretty_metadata) > 1:
for line in pretty_metadata[1:]:
new_lines.append('{0}{1}'.format(' ' * (len(assignments) - 1 + len(' = {')), line))
old_lines = module_data.split('\n')
lines = old_lines[:insertion_line] + new_lines + old_lines[insertion_line:]
return '\n'.join(lines)
def parse_assigned_metadata_initial(csvfile):
"""
Fields:
:0: Module name
:1: Core (x if so)
:2: Extras (x if so)
:3: Category
:4: Supported/SLA
:5: Curated
:6: Stable
:7: Deprecated
:8: Notes
:9: Team Notes
:10: Notes 2
:11: final supported_by field
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
if record[12] == 'core':
supported_by = 'core'
elif record[12] == 'curated':
supported_by = 'curated'
elif record[12] == 'community':
supported_by = 'community'
else:
print('Module %s has no supported_by field. Using community' % record[0])
supported_by = 'community'
supported_by = DEFAULT_METADATA['supported_by']
status = []
if record[6]:
status.append('stableinterface')
if record[7]:
status.append('deprecated')
if not status:
status.extend(DEFAULT_METADATA['status'])
yield (module, {'version': DEFAULT_METADATA['metadata_version'], 'supported_by': supported_by, 'status': status})
def parse_assigned_metadata(csvfile):
"""
Fields:
:0: Module name
:1: supported_by string. One of the valid support fields
core, community, certified, network
:2: stableinterface
:3: preview
:4: deprecated
:5: removed
https://docs.ansible.com/ansible/latest/dev_guide/developing_modules_documenting.html#ansible-metadata-block
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
supported_by = record[1]
status = []
if record[2]:
status.append('stableinterface')
if record[4]:
status.append('deprecated')
if record[5]:
status.append('removed')
if not status or record[3]:
status.append('preview')
yield (module, {'metadata_version': '1.1', 'supported_by': supported_by, 'status': status})
def write_metadata(filename, new_metadata, version=None, overwrite=False):
with open(filename, 'rb') as f:
module_data = f.read()
try:
current_metadata, start_line, start_col, end_line, end_col, targets = \
extract_metadata(module_data=module_data, offsets=True)
except SyntaxError:
if filename.endswith('.py'):
raise
# Probably non-python modules. These should all have python
# documentation files where we can place the data
raise ParseError('Could not add metadata to {0}'.format(filename))
if current_metadata is None:
# No current metadata so we can just add it
start_line = find_documentation(module_data)
if start_line < 0:
if os.path.basename(filename) in NONMODULE_PY_FILES:
# These aren't new-style modules
return
raise Exception('Module file {0} had no ANSIBLE_METADATA or DOCUMENTATION'.format(filename))
module_data = insert_metadata(module_data, new_metadata, start_line, targets=('ANSIBLE_METADATA',))
elif overwrite or (version is not None and ('metadata_version' not in current_metadata or
StrictVersion(current_metadata['metadata_version']) < StrictVersion(version))):
# Current metadata that we do not want. Remove the current
# metadata and put the new version in its place
module_data = remove_metadata(module_data, start_line, start_col, end_line, end_col)
module_data = insert_metadata(module_data, new_metadata, start_line, targets=targets)
else:
# Current metadata and we don't want to overwrite it
return
# Save the new version of the module
with open(filename, 'wb') as f:
f.write(module_data)
def return_metadata(plugins):
"""Get the metadata for all modules
Handle duplicate module names
:arg plugins: List of plugins to look for
:returns: Mapping of plugin name to metadata dictionary
"""
metadata = {}
for name, filename in plugins:
# There may be several files for a module (if it is written in another
# language, for instance) but only one of them (the .py file) should
# contain the metadata.
if name not in metadata or metadata[name] is not None:
with open(filename, 'rb') as f:
module_data = f.read()
metadata[name] = extract_metadata(module_data=module_data, offsets=True)[0]
return metadata
def metadata_summary(plugins, version=None):
"""Compile information about the metadata status for a list of modules
:arg plugins: List of plugins to look for. Each entry in the list is
a tuple of (module name, full path to module)
:kwarg version: If given, make sure the modules have this version of
metadata or higher.
:returns: A tuple consisting of a list of modules with no metadata at the
required version and a list of files that have metadata at the
required version.
"""
no_metadata = {}
has_metadata = {}
supported_by = defaultdict(set)
status = defaultdict(set)
requested_version = StrictVersion(version)
all_mods_metadata = return_metadata(plugins)
for name, filename in plugins:
# Does the module have metadata?
if name not in no_metadata and name not in has_metadata:
metadata = all_mods_metadata[name]
if metadata is None:
no_metadata[name] = filename
elif version is not None and ('metadata_version' not in metadata or StrictVersion(metadata['metadata_version']) < requested_version):
no_metadata[name] = filename
else:
has_metadata[name] = filename
# What categories does the plugin belong in?
if all_mods_metadata[name] is None:
# No metadata for this module. Use the default metadata
supported_by[DEFAULT_METADATA['supported_by']].add(filename)
status[DEFAULT_METADATA['status'][0]].add(filename)
else:
supported_by[all_mods_metadata[name]['supported_by']].add(filename)
for one_status in all_mods_metadata[name]['status']:
status[one_status].add(filename)
return list(no_metadata.values()), list(has_metadata.values()), supported_by, status
# Filters to convert between metadata versions
def convert_metadata_pre_1_0_to_1_0(metadata):
"""
Convert pre-1.0 to 1.0 metadata format
:arg metadata: The old metadata
:returns: The new metadata
Changes from pre-1.0 to 1.0:
* ``version`` field renamed to ``metadata_version``
* ``supported_by`` field value ``unmaintained`` has been removed (change to
``community`` and let an external list track whether a module is unmaintained)
* ``supported_by`` field value ``committer`` has been renamed to ``curated``
"""
new_metadata = {'metadata_version': '1.0',
'supported_by': metadata['supported_by'],
'status': metadata['status']
}
if new_metadata['supported_by'] == 'unmaintained':
new_metadata['supported_by'] = 'community'
elif new_metadata['supported_by'] == 'committer':
new_metadata['supported_by'] = 'curated'
return new_metadata
def convert_metadata_1_0_to_1_1(metadata):
"""
Convert 1.0 to 1.1 metadata format
:arg metadata: The old metadata
:returns: The new metadata
Changes from 1.0 to 1.1:
* ``supported_by`` field value ``curated`` has been removed
* ``supported_by`` field value ``certified`` has been added
* ``supported_by`` field value ``network`` has been added
"""
new_metadata = {'metadata_version': '1.1',
'supported_by': metadata['supported_by'],
'status': metadata['status']
}
if new_metadata['supported_by'] == 'unmaintained':
new_metadata['supported_by'] = 'community'
elif new_metadata['supported_by'] == 'curated':
new_metadata['supported_by'] = 'certified'
return new_metadata
# Subcommands
def add_from_csv(csv_file, version=None, overwrite=False):
"""Implement the subcommand to add metadata from a csv file
"""
# Add metadata for everything from the CSV file
diagnostic_messages = []
for module_name, new_metadata in parse_assigned_metadata(csv_file):
filename = module_loader.find_plugin(module_name, mod_type='.py')
if filename is None:
diagnostic_messages.append('Unable to find the module file for {0}'.format(module_name))
continue
try:
write_metadata(filename, new_metadata, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def add_default(version=None, overwrite=False):
"""Implement the subcommand to add default metadata to modules
Add the default metadata to any plugin which lacks it.
:kwarg version: If given, the metadata must be at least this version.
Otherwise, treat the module as not having existing metadata.
:kwarg overwrite: If True, overwrite any existing metadata. Otherwise,
do not modify files which have metadata at an appropriate version
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
# Iterate through each plugin
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
try:
write_metadata(filename, DEFAULT_METADATA, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def upgrade_metadata(version=None):
"""Implement the subcommand to upgrade the default metadata in modules.
:kwarg version: If given, the version of the metadata to upgrade to. If
not given, upgrade to the latest format version.
"""
if version is None:
# Number larger than any of the defined metadata formats.
version = 9999999
requested_version = StrictVersion(version)
# List all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
# For each plugin, read the existing metadata
with open(filename, 'rb') as f:
module_data = f.read()
metadata = extract_metadata(module_data=module_data, offsets=True)[0]
# If the metadata isn't the requested version, convert it to the new
# version
if 'metadata_version' not in metadata or metadata['metadata_version'] != version:
#
# With each iteration of metadata, add a new conditional to
# upgrade from the previous version
#
if 'metadata_version' not in metadata:
# First version, pre-1.0 final metadata
metadata = convert_metadata_pre_1_0_to_1_0(metadata)
if metadata['metadata_version'] == '1.0' and StrictVersion('1.0') < requested_version:
metadata = convert_metadata_1_0_to_1_1(metadata)
if metadata['metadata_version'] == '1.1' and StrictVersion('1.1') < requested_version:
# 1.1 version => XXX. We don't yet have anything beyond 1.1
# so there's nothing here
pass
# Replace the existing metadata with the new format
try:
write_metadata(filename, metadata, version, overwrite=True)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def report(version=None):
"""Implement the report subcommand
Print out all the modules that have metadata and all the ones that do not.
:kwarg version: If given, the metadata must be at least this version.
Otherwise return it as not having metadata
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
plugins = list(plugins)
no_metadata, has_metadata, support, status = metadata_summary(plugins, version=version)
print('== Has metadata ==')
pprint(sorted(has_metadata))
print('')
print('== Has no metadata ==')
pprint(sorted(no_metadata))
print('')
print('== Supported by core ==')
pprint(sorted(support['core']))
print('== Supported by value certified ==')
pprint(sorted(support['certified']))
print('== Supported by value network ==')
pprint(sorted(support['network']))
print('== Supported by community ==')
pprint(sorted(support['community']))
print('')
print('== Status: stableinterface ==')
pprint(sorted(status['stableinterface']))
print('== Status: preview ==')
pprint(sorted(status['preview']))
print('== Status: deprecated ==')
pprint(sorted(status['deprecated']))
print('== Status: removed ==')
pprint(sorted(status['removed']))
print('')
print('== Summary ==')
print('No Metadata: {0} Has Metadata: {1}'.format(len(no_metadata), len(has_metadata)))
print('Support level: core: {0} community: {1} certified: {2} network: {3}'.format(len(support['core']),
len(support['community']), len(support['certified']), len(support['network'])))
print('Status StableInterface: {0} Status Preview: {1} Status Deprecated: {2} Status Removed: {3}'.format(len(status['stableinterface']),
len(status['preview']), len(status['deprecated']), len(status['removed'])))
return 0
if __name__ == '__main__':
action, args = parse_args(sys.argv[1:])
if action == 'report':
rc = report(version=args['version'])
elif action == 'add':
rc = add_from_csv(args['csvfile'], version=args['version'], overwrite=args['overwrite'])
elif action == 'add-default':
rc = add_default(version=args['version'], overwrite=args['overwrite'])
elif action == 'upgrade':
rc = upgrade_metadata(version=args['version'])
sys.exit(rc)
|
#!/usr/bin/python3
import sys
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
getFlag = False
flag = "flag_is_here"
def activator(key):
global getFlag
check_1 = 0
check_2 = 0
check_3 = 0
check_4 = 0
valid = key.split("-")
for i in valid[0]:
if int(i)%2 == 0:
check_1 += 1
if int(valid[1]) > 1:
for i in range(2,int(valid[1])):
if int(valid[1])%i == 0:
break
else:
check_2 = 1
if int(valid[2])%4 == 0:
check_3 = 1
if valid[3] == "".join(map(chr, [71,75,83,75])):
check_4 = 1
if check_1 == 4 and check_2 == 1 and check_3 == 1 and check_4 == 1:
getFlag = True
print("Serial is successfully activated! :)")
def validator(key):
valid = key.split("-")
checkstrip = [4,9,14]
checkcode = 0
for val in valid:
if len(val) == 4:
checkcode += 1
for strip in checkstrip:
if key[strip] == "-":
checkcode += 1
if checkcode == 7:
return True
else:
print("Serial isn't valid!")
return False
def main():
while True :
print("1. Get Flag")
print("2. Enter Activation Code")
print("3. Exit")
pilih = int(input("Input :"))
if pilih == 1:
if getFlag :
print(flag)
else:
print("Enter Activation Code First Bro!")
elif pilih == 2:
inp = input("Enter Activation Code :")
valid_code = validator(inp)
if valid_code :
activator(inp)
elif pilih == 3:
print("GoodBye! <3")
exit()
else:
print("Error!")
exit()
if __name__=='__main__':
main()
|
# This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
import importlib
from pynguin.utils.utils import get_members_from_module
def test_get_members_from_module():
module = importlib.import_module("tests.fixtures.examples.triangle")
members = get_members_from_module(module)
assert len(members) == 1
assert members[0][0] == "triangle"
|
"""
Documentation supporting the 5Bp data.
Created 2012-05-07 by Tom Loredo
2019: Converted to Python 3
"""
import webbrowser
__all__ = ['show_ssc', 'show_archive', 'show_tech', 'show_sw',
'show_4B', 'show_5Bp', 'show_problems']
# Access to CGRO SSC documents, incl. 4B, 5Bp ("Current") catalog web pages:
url_ssc = 'http://heasarc.gsfc.nasa.gov/docs/cgro/'
url_archive = 'http://heasarc.gsfc.nasa.gov/docs/journal/cgro7.html'
url_tech = 'http://heasarc.gsfc.nasa.gov/docs/cgro/cossc/nra/appendix_g.html#V.%20BATSE%20GUEST%20INVESTIGATOR%20PROGRAM'
url_sw = 'ftp://legacy.gsfc.nasa.gov/compton/software/'
url_4B = 'http://gammaray.msfc.nasa.gov/batse/grb/catalog/4b/'
url_5Bp = 'http://gammaray.msfc.nasa.gov/batse/grb/catalog/current/'
url_problems = 'ftp://legacy.gsfc.nasa.gov/compton/data/batse/trigger/data_problems/'
def show_ssc():
"""
Show the COSSC web site in the user's default web browser.
"""
webbrowser.open(url_ssc)
def show_archive():
"""
Show the *Legacy* article with a CGRO archive web site overview in the
user's default web browser.
"""
webbrowser.open(url_archive)
def show_4B():
"""
Show the 4B catalog web site in the user's default web browser.
"""
webbrowser.open(url_4B)
def show_5Bp():
"""
Show the 5Bp ("current") catalog web site in the user's default web browser.
"""
webbrowser.open(url_5Bp)
def show_problems():
"""
Show the BATSE data problems archive in the user's default web browser.
"""
webbrowser.open(url_problems)
def show_tech():
"""
Display the BATSE technical description (from the CGRO GI program RA) in the
user's default web browser.
"""
webbrowser.open(url_tech)
def show_sw():
"""
Display the SSC software directory in the user's default web browser.
"""
webbrowser.open(url_sw)
# Basic table description:
basic = """
There are twelve columns in the Basic Table file:
1. The BATSE trigger number
2. The BATSE Catalog burst name
3. The truncated Julian Date (TJD) of the trigger; TJD = JD - 2440000.5
4. The time in decimal seconds of day (UT) of the trigger
5. Right ascension (J2000) in decimal degrees
6. Declination (J2000) in decimal degrees
7. Galactic longitude in decimal degrees
8. Galactic latitude in decimal degrees
9. Radius in decimal degrees of positional error box
10. Angle in decimal degrees of geocenter (the angle between the burst and the nadir, as measured from the satellite)
11. Overwrite flag: Y(true) if this burst overwrote an earlier, weaker trigger. N(false) otherwise
12. Overwritten flag: Y(true) if this burst was overwritten by a later, more intense trigger. N(false) otherwise
"""
# Flux table description, from 4B catalog:
flux = """\
The FLUX Table contains the fluence and peak flux values for the BATSE
gamma-ray bursts between 19 April, 1991 and 29 August, 1996. There are 1292
bursts, each specified by the BATSE trigger number. This table contains 18
columns. All fluences and their errors have units of of ergs/cm^2. All peak
fluxes and their errors have units of photons/cm^2/sec. The errors are one
sigma statistical errors. The peak flux times are expressed in decimal seconds
relative to the burst trigger time for the end of the interval in which the
flux was calculated. The channel 1,2,3 and 4 fluences cover the energy ranges
20-50 keV, 50-100 keV, 100-300 keV, and E > 300 keV respectively. The peak
flux energy range is 50-300 keV, coinciding with the energy range of the
nominal BATSE on-board burst trigger.
Since channel 4 is an integral channel, fluences given for this channel are
quite sensitive to the assumed spectral form. Spectral analyses in this energy
range should be performed with higher resolution data types.
Many of the bursts between March 1992 and March 1993 have significant gaps in
the data and are not included in the table.
"""
# Duration table description, from 4B catalog:
durn = """\
The DURATION Table contains values for T90 and T50, quantities related to
burst duration, for 1234 gamma-ray bursts that triggered the BATSE LAD
detectors between April 1991 and 29 August 1996. T90 measures the duration of
the time interval during which 90% of the total observed counts have been
detected. The start of the T90 interval is defined by the time at which 5% of
the total counts have been detected, and the end of the T90 interval is
defined by the time at which 95% of the total counts have been detected. T50
is similarly defined using the times at which 25% and 75% of the counts have
been detected. T90 and T50 are calculated using data summed over the 4 LAD
discriminator channels and using data summed over only those detectors that
satisfied the BATSE trigger criteria.
Users must note that T90 and T50 are not available for those bursts which
suffer from data gaps during the event; the integration procedure inherently
fails in these cases. However, visual estimates of the burst duration are
provided in the BATSE Comments table for those bursts with sufficient data
coverage. Users may also find other pertinent comments concerning the
calculated value of T90 and T50 in the BATSE COMMENTS table, and it is highly
recommended that the COMMENTS table be consulted before any distribution
selected on T90 or T50 is used.
"""
# Comments description; only for 4B bursts:
comments = """\
The COMMENTS Table contains comments relevant to BATSE gamma-ray burst data
found in the GROSSC BATSE burst catalog. Each category of comment is sorted by
a flag identification and ordered by BATSE trigger number. A gamma-ray burst
may have more than one entry or may have no entry.
Flag Definition
Q comments on data quality
A additional observations by other instruments
O general comments
L comments on the gamma-ray burst coordinates
T comments on the gamma-ray burst duration
"""
# Reclassification information as of 2005-08-01; tab-delimited:
reclass = """\
288 GRB. Reclassified from solar flare in May 1995. Changed from 00288_flare to 00288_burst
1530 Probably GRB; very little data. Not available at COSSC
2154 GRB. No data except max rates. Not available at COSSC
2162 Probably GRB. No data except max rates. Not available at COSSC
2411 Thought to be a particle event. Changed from 02411_burst to 02411_particles
2548 Commanded trigger to get data on a transient source. Not available at COSSC
2777 Solar flare
2778 Solar flare
3452 Unknown - possibly a GRB. 03452_burst
3720 Triggered on a Cyg X-1 fluctuation in dets. 4,5; a GRB occurred at approx. T+25 s, primarily in dets. 7,5,3,1. 03720_burst
3904 The data in the TTE file seem to be partially corrupted. The reason is not understood but may be a hardware problem. The TTE data seem to change from the trigger detectors to two other detectors. The trigger detectors (DSELB) were 2 and 6. The first quarter of TTE (first half of packet ID 33Hex), which is pre-load time, has all detectors as it should. The first two packets and part of the third packet of the second quarter of the TTE data (post-load time) have detectors 2 and 6 as they should. However, part of the way through the third packet (packet sequence number 67 of packet ID 33Hex) and for the remainder of the TTE file, only detectors 0 and 1 appear.
3931 Terrestrial Gamma-ray Flash. Changed from 03931_burst to 03931_tgf TTE & STTE data
4327 During flight software revisions, the TTE data in the second board became bad for a limited time. Therefore, triggers no. 4317 - 4336 bad TTE data in the second board (second quarter of TTE data). (The TTE data for trigger no. 4317 should be all bad for related but different reasons.)
5339 TTE data are OK but STTE data are for trigger 5338 because of overwriting.
"""
|
"""Module for HGVS tokenization."""
from typing import Optional
from .tokenizer import Tokenizer
from variation.tokenizers.reference_sequence import REFSEQ_PREFIXES
from variation.schemas.token_response_schema import Token, TokenMatchType
from hgvs.parser import Parser
from hgvs.exceptions import HGVSParseError, HGVSInvalidVariantError
from hgvs.validator import IntrinsicValidator
from .locus_reference_genomic import LocusReferenceGenomic
class HGVS(Tokenizer):
"""The HGVS tokenizer class."""
def __init__(self) -> None:
"""Initialize the HGVS tokenizer class."""
self.parser = Parser()
self.validator = IntrinsicValidator()
def match(self, input_string: str) -> Optional[Token]:
"""Return token matches from input string."""
valid_prefix = False
for prefix in REFSEQ_PREFIXES:
if input_string.upper().startswith(prefix):
valid_prefix = True
break
if not valid_prefix:
return None
try:
variation = self.parser.parse_hgvs_variant(input_string)
self.validator.validate(variation)
if input_string[:3].upper().startswith('LRG'):
lrg_token = LocusReferenceGenomic().match(
input_string.split(':')[0]
)
lrg_token.input_string = input_string
return lrg_token
else:
return Token(
token=input_string,
token_type='HGVS',
input_string=input_string,
match_type=TokenMatchType.UNSPECIFIED
)
except (HGVSParseError, HGVSInvalidVariantError, AttributeError):
return None
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def login():
print 'login'
def logout():
print 'logout' |
import random
from tkinter import *
from math import *
from time import *
from tkinter import filedialog
from tkinter import font
import tkinter.font as tkFont
import tkinter as tkr
root = Tk()
root.geometry("800x400")
root.title( "Simulation MCU 1.0v" )
root.config(bg="#ABB2B9")
menubar = Menu(root,bg="red")
global open_status_name
open_status_name = False
# Sauvegarder le fichier
def save_file():
file = filedialog.asksaveasfile()
# Ouvrir le fichier
def open_file():
file = filedialog.askopenfile()
# Fermer le fichier
def close_file():
file = filedialog.asksaveasfilename()
def openAT91():
new_window = Toplevel(root)
new_window.geometry("1200x800")
new_window.config(bg="#ABB2B9")
new_window.title("Atmel AT91")
new_window.resizable(True)
photoMCU = PhotoImage(file='C:\\Users\\NaKoO\Desktop\\Learn To code\\Project Developers\\Tkinter Python\\Project Software\\images\\AT91.png')
label = Label(new_window, image=photoMCU)
lbl = Label(new_window, text="Microcontrôleur ATMEL")
lbl.pack
pic = "C:\\Users\\NaKoO\Desktop\\Learn To code\\Project Developers\\Tkinter Python\\Project Software\\images\\AT91.png"
pic1 = new_window.Image.open(pic)
photo = new_window.PhotoImage(pic1)
label1 = new_window.Label(root, image=photo)
def ARMCortexM():
new_window = Toplevel(root)
new_window.geometry("1200x800")
new_window.title("ARM Cortex-M")
new_window.resizable(False)
lbl = Label(new_window, text="Microcontrôleur ARM")
lbl.pack
ButtonExit = Button(new_window, background="blue", text="Annuler", command=lambda: new_window.destroy())
ButtonExit.place(50, 50)
ButtonExit.pack()
def AtmelAVR():
new_window = Toplevel(root)
new_window.geometry("1200x800")
new_window.title("Atmel AVR")
new_window.resizable(True)
lbl = Label(new_window, text="Microcontrôleur ATMEL")
lbl.pack
ButtonExit = Button(new_window, background="blue", text="Annuler", command=lambda: new_window.destroy())
ButtonExit.place(50, 50)
ButtonExit.pack()
def C167():
new_window = Toplevel(root)
new_window.geometry("1200x800")
new_window.title("C167")
new_window.resizable(True)
lbl = Label(new_window, text="Microcontrôleur C167")
lbl.pack
ButtonExit = Button(new_window, background="blue", text="Annuler", command=lambda: new_window.destroy())
ButtonExit.place(50, 50)
ButtonExit.pack()
# Les boutons
Button1 = Button(root, text="Atmel AT91", padx=60, pady=8, bg="#6B7A89")
Button1.place(x=10, y =80)
Button2 = Button(root, text="ARM Cortex-M", padx=60, pady=8, bg="#6B7A89")
Button2.place(x=10, y =130)
Button3 = Button(root, text="Atmel AVR", padx=60, pady=8, bg="#6B7A89")
Button3.place(x=250, y =80)
Button4 = Button(root, text="C167", padx=60, pady=8, bg="#6B7A89")
Button4.place(x=250, y =130)
Button5 = Button(root, text="Intel 8051", padx=60, pady=8, bg="#6B7A89")
Button5.place(x=10, y =180)
Button6 = Button(root, text="Intel 8051", padx=60, pady=8, bg="#6B7A89")
Button6.place(x=10, y =230)
Button7 = Button(root, text="Freescal 68HC11", padx=60, pady=8, bg="#6B7A89")
Button7.place(x=250, y =180)
Button8 = Button(root, text="Freescale 68HC12", padx=60, pady=8, bg="#6B7A89")
Button8.place(x=250, y =230)
#Menu pour la commande "Fichier"
filemenu = Menu(menubar,tearoff=0,activeborderwidth=10)
filemenu.add_command(label="Créer fichier")
filemenu.add_command(label="Fermer le fichier", command=close_file)
filemenu.add_command(label="Ouvrir le fichier", command=open_file)
filemenu.add_command(label="Ouvrir le dossier", command=open_file)
filemenu.add_command(label="Sauvegarder", command=save_file)
filemenu.add_command(label="Imprimer")
filemenu.add_command(label="Options")
filemenu.add_command(label="Quitter",activebackground="#F13535", command=root.destroy)
#Menu pour la commande "Editer"
editmenu = Menu(menubar,tearoff=0,activeborderwidth=10)
editmenu.add_command(label="Créer une simulation",activebackground="green")
editmenu.add_command(label="Créer un microcontrôleur",activebackground="#965EE9")
editmenu.add_command(label="Raspberry Pi")
editmenu.add_command(label="Couper")
editmenu.add_command(label="Copier")
editmenu.add_command(label="Coller")
editmenu.add_command(label="Supprimer",activebackground="red")
#Menu pour la commande "Microcontrôleurs"
helpmenu = Menu(menubar,tearoff=0,activeborderwidth=5)
helpmenu.add_command(label="Atmel AT91", command=openAT91,activebackground="#C0392B")
helpmenu.add_command(label="ARM Cortex-M", command=ARMCortexM,activebackground="#E74C3C")
helpmenu.add_command(label="Atmel AVR", command=AtmelAVR,activebackground="#9B59B6")
helpmenu.add_command(label="C167", command=C167,activebackground="#8E44AD")
helpmenu.add_command(label="Intel 8051",activebackground="#2980B9")
helpmenu.add_command(label="Intel 8051", activebackground="#3498DB")
helpmenu.add_command(label="Intel 8085", activebackground="#16A085")
helpmenu.add_command(label="Freescal 68HC11", activebackground="#27AE60")
helpmenu.add_command(label="Freescale 68HC12", activebackground="#F1C40F")
helpmenu.add_command(label="MSP430", activebackground="#F39C12")
helpmenu.add_command(label="8080", activebackground="#D35400")
# Menu pour la commande "Modèles Raspberry Pi"
modelemenu = Menu(menubar,tearoff=0,activeborderwidth=5)
modelemenu.add_command(label="Pico")
modelemenu.add_command(label="400")
modelemenu.add_command(label="B 8GB")
modelemenu.add_command(label="4 B")
modelemenu.add_command(label="3 Model A+")
modelemenu.add_command(label="Zero")
modelemenu.add_command(label="Zero W")
modelemenu.add_command(label="Zero WH")
modelemenu.add_command(label="3")
modelemenu.add_command(label="2")
modelemenu.add_command(label="B")
# Commande Menubar
menubar.add_cascade(label="Fichier",menu=filemenu)
menubar.add_cascade(label="Editer",menu=editmenu)
menubar.add_cascade(label="Microcontrôleurs",menu=helpmenu)
menubar.add_cascade(label="Modèles Raspberry Pi",menu=modelemenu)
root.config(menu=menubar)
root.mainloop() #Et bingo !
|
############################
# written by Lam M. Nguyen and Trang H. Tran
############################
"""
Load Data files
"""
import numpy as np
import sys, os
import pandas as pd
from csv import reader
#-------------------------------------------------------------------------------
# Load a CSV file
def load_csv(filename):
dataset = list()
with open(filename, 'r') as file:
csv_reader = reader(file)
for row in csv_reader:
if not row:
continue
dataset.append(row)
return dataset
#-------------------------------------------------------------------------------
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
#-------------------------------------------------------------------------------
# Import data
def import_data(data_option):
source_path = './data/' + data_option
# Train Data
train_x_data = load_csv(source_path + '_x_train.csv')
for i in range(len(train_x_data[0])):
str_column_to_float(train_x_data, i)
X_train = np.array(train_x_data)
train_y_data = load_csv(source_path + '_y_train.csv')
for i in range(len(train_y_data[0])):
str_column_to_float(train_y_data, i)
Y_train = np.array(train_y_data)
# Test Data
test_x_data = load_csv(source_path + '_x_test.csv')
for i in range(len(test_x_data[0])):
str_column_to_float(test_x_data, i)
X_test = np.array(test_x_data)
test_y_data = load_csv(source_path + '_y_test.csv')
for i in range(len(test_y_data[0])):
str_column_to_float(test_y_data, i)
Y_test = np.array(test_y_data)
# Normalize data
max_val = np.max(X_train)
min_val = np.min(X_train)
X_train = (X_train - min_val)/(max_val - min_val)
X_test = (X_test - min_val)/(max_val - min_val)
return X_train, Y_train, X_test, Y_test |
from . import config
import torch
import torch_geometric as tg
from tqdm.auto import tqdm
import itertools as it
class EmbedModel(torch.nn.Module):
def __init__(self, n_layers, input_dim, hidden_dim, output_dim, conv='gin', pool='add'):
super().__init__()
self.n_layers = n_layers
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.pre = torch.nn.Linear(self.input_dim, self.hidden_dim)
if conv == 'gin':
make_conv = lambda:\
tg.nn.GINConv(torch.nn.Sequential(
torch.nn.Linear(self.hidden_dim, self.hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.hidden_dim, self.hidden_dim)
))
elif conv == 'gcn':
make_conv = lambda:\
tg.nn.GCNConv(self.hidden_dim, self.hidden_dim)
elif conv == 'sage':
make_conv = lambda:\
tg.nn.SAGEConv(self.hidden_dim, self.hidden_dim)
elif conv == 'gat':
make_conv = lambda:\
tg.nn.GATConv(self.hidden_dim, self.hidden_dim)
else:
assert False
self.convs = torch.nn.ModuleList()
for l in range(self.n_layers):
self.convs.append(make_conv())
self.post = torch.nn.Sequential(
torch.nn.Linear(self.hidden_dim*(self.n_layers+1), self.hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.hidden_dim, self.output_dim)
)
if pool == 'add':
self.pool = tg.nn.global_add_pool
elif pool == 'mean':
self.pool = tg.nn.global_mean_pool
elif pool == 'max':
self.pool = tg.nn.global_max_pool
elif pool == 'sort':
self.pool = tg.nn.global_sort_pool
elif pool == 'att':
self.pool = tg.nn.GlobalAttention(torch.nn.Sequential(
torch.nn.Linear(self.hidden_dim*(self.n_layers+1), self.hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(self.hidden_dim, 1)
))
elif pool == 'set':
self.pool = tg.nn.Set2Set(self.hidden_dim*(self.n_layers+1),1)
self.pool_str = pool
def forward(self, g):
x = g.x
edge_index = g.edge_index
x = self.pre(x)
emb = x
xres = x
for i in range(self.n_layers):
x = self.convs[i](x, edge_index)
if i&1:
x += xres
xres = x
x = torch.nn.functional.relu(x)
emb = torch.cat((emb, x), dim=1)
x = emb
if self.pool_str == 'sort':
x = self.pool(x, g.batch, k=1)
else:
x = self.pool(x, g.batch)
x = self.post(x)
return x
class SiameseModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.embed_model = None
self.weighted = False
def forward_emb(self, gx, hx):
raise NotImplementedError
def forward(self, g, h):
if self.weighted:
self.gs = torch.tensor([x.num_nodes for x in g.to_data_list()], device=config.device)
self.hs = torch.tensor([x.num_nodes for x in h.to_data_list()], device=config.device)
gx = self.embed_model(g)
hx = self.embed_model(h)
return self.forward_emb(gx, hx)
def predict_inner(self, queries, targets, batch_size=None):
self = self.to(config.device)
if batch_size is None or len(queries) <= batch_size:
tqdm.write(f'direct predict inner dataset')
g = tg.data.Batch.from_data_list(queries).to(config.device)
h = tg.data.Batch.from_data_list(targets).to(config.device)
with torch.no_grad():
return self.forward(g, h)
else:
tqdm.write(f'batch predict inner dataset')
tqdm.write(f'config.n_workers: {config.n_workers}')
loader = tg.data.DataLoader(list(zip(queries, targets)), batch_size, num_workers=config.n_workers)
ret = torch.empty(len(queries), device=config.device)
for i, (g, h) in enumerate(tqdm(loader, 'batches')):
g = g.to(config.device)
h = h.to(config.device)
with torch.no_grad():
ret[i*batch_size:(i+1)*batch_size] = self.forward(g, h)
return ret
def predict_outer(self, queries, targets, batch_size=None):
self = self.to(config.device)
if batch_size is None or len(queries)*len(targets) <= batch_size:
tqdm.write(f'direct predict outer dataset')
g = tg.data.Batch.from_data_list(queries).to(config.device)
h = tg.data.Batch.from_data_list(targets).to(config.device)
gx = self.embed_model(g)
hx = self.embed_model(h)
with torch.no_grad():
return self.forward_emb(gx[:,None,:], hx)
else:
tqdm.write(f'batch predict outer dataset')
tqdm.write(f'config.n_workers: {config.n_workers}')
g = tg.data.Batch.from_data_list(queries).to(config.device)
gx = self.embed_model(g)
loader = tg.data.DataLoader(targets, batch_size//len(queries), num_workers=config.n_workers)
ret = torch.empty(len(queries), len(targets), device=config.device)
for i, h in enumerate(tqdm(loader, 'batches')):
h = h.to(config.device)
hx = self.embed_model(h)
with torch.no_grad():
ret[:,i*loader.batch_size:(i+1)*loader.batch_size] = self.forward_emb(gx[:,None,:], hx)
return ret
def criterion(self, lb, ub, pred):
loss = torch.nn.functional.relu(lb-pred)**2 + torch.nn.functional.relu(pred-ub)**2
if self.weighted:
loss /= ((self.gs+self.hs)/2)**2
loss = torch.mean(loss)
return loss
class NeuralSiameseModel(SiameseModel):
def __init__(self, n_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.embed_model = EmbedModel(n_layers, input_dim, hidden_dim, output_dim)
self.mlp_model = torch.nn.Sequential(
torch.nn.Linear(2*output_dim, output_dim),
torch.nn.ReLU(),
torch.nn.Linear(output_dim, 1)
)
def forward_emb(self, gx, hx):
if gx.dim() == hx.dim():
return self.mlp_model(torch.cat((gx, hx), dim=-1)).view(-1)
else:
gx = gx[:,0,:]
return self.mlp_model(torch.cat((torch.repeat_interleave(gx,hx.shape[0],dim=0), torch.tile(hx,[gx.shape[0],1])), dim=-1)).view(gx.shape[0],hx.shape[0])
class NormGEDModel(SiameseModel):
def __init__(self, *args, **kwargs):
super().__init__()
self.embed_model = EmbedModel(*args, **kwargs)
def forward_emb(self, gx, hx):
return torch.norm(gx-hx, dim=-1)
class NormSEDModel(SiameseModel):
def __init__(self, *args, **kwargs):
super().__init__()
self.embed_model = EmbedModel(*args, **kwargs)
def forward_emb(self, gx, hx):
return torch.norm(torch.nn.functional.relu(gx-hx), dim=-1)
class DualSiameseModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.embed_model_g = None
self.embed_model_h = None
self.weighted = False
def forward_emb(self, gx, hx):
raise NotImplementedError
def forward(self, g, h):
gx = self.embed_model_g(g)
hx = self.embed_model_h(h)
return self.forward_emb(gx, hx)
def predict_inner(self, queries, targets, batch_size=None):
self = self.to(config.device)
if batch_size is None or len(queries) <= batch_size:
tqdm.write(f'direct predict inner dataset')
g = tg.data.Batch.from_data_list(queries).to(config.device)
h = tg.data.Batch.from_data_list(targets).to(config.device)
with torch.no_grad():
return self.forward(g, h)
else:
tqdm.write(f'batch predict inner dataset')
tqdm.write(f'config.n_workers: {config.n_workers}')
loader = tg.data.DataLoader(list(zip(queries, targets)), batch_size, num_workers=config.n_workers)
ret = torch.empty(len(queries), device=config.device)
for i, (g, h) in enumerate(tqdm(loader, 'batches')):
g = g.to(config.device)
h = h.to(config.device)
with torch.no_grad():
ret[i*batch_size:(i+1)*batch_size] = self.forward(g, h)
return ret
def predict_outer(self, queries, targets, batch_size=None):
self = self.to(config.device)
if batch_size is None or len(queries)*len(targets) <= batch_size:
tqdm.write(f'direct predict outer dataset')
g = tg.data.Batch.from_data_list(queries).to(config.device)
h = tg.data.Batch.from_data_list(targets).to(config.device)
gx = self.embed_model_g(g)
hx = self.embed_model_h(h)
with torch.no_grad():
return self.forward_emb(gx[:,None,:], hx)
else:
tqdm.write(f'batch predict outer dataset')
tqdm.write(f'config.n_workers: {config.n_workers}')
g = tg.data.Batch.from_data_list(queries).to(config.device)
gx = self.embed_model_g(g)
loader = tg.data.DataLoader(targets, batch_size//len(queries), num_workers=config.n_workers)
ret = torch.empty(len(queries), len(targets), device=config.device)
for i, h in enumerate(tqdm(loader, 'batches')):
h = h.to(config.device)
hx = self.embed_model_h(h)
with torch.no_grad():
ret[:,i*loader.batch_size:(i+1)*loader.batch_size] = self.forward_emb(gx[:,None,:], hx)
return ret
def criterion(self, lb, ub, pred):
loss = torch.nn.functional.relu(lb-pred)**2 + torch.nn.functional.relu(pred-ub)**2
if self.weighted:
loss /= ((lb+ub)/2+1)**2
loss = torch.mean(loss)
return loss
class DualNeuralSiameseModel(DualSiameseModel):
def __init__(self, n_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.embed_model_g = EmbedModel(n_layers, input_dim, hidden_dim, output_dim)
self.embed_model_h = EmbedModel(n_layers, input_dim, hidden_dim, output_dim)
self.mlp_model = torch.nn.Sequential(
torch.nn.Linear(2*output_dim, output_dim),
torch.nn.ReLU(),
torch.nn.Linear(output_dim, 1)
)
def forward_emb(self, gx, hx):
return self.mlp_model(torch.cat((gx, hx), dim=-1)).view(-1)
class DualNormGEDModel(DualSiameseModel):
def __init__(self, *args, **kwargs):
super().__init__()
self.embed_model_g = EmbedModel(*args, **kwargs)
self.embed_model_h = EmbedModel(*args, **kwargs)
def forward_emb(self, gx, hx):
return torch.norm(gx-hx, dim=-1)
class DualNormSEDModel(DualSiameseModel):
def __init__(self, *args, **kwargs):
super().__init__()
self.embed_model_g = EmbedModel(*args, **kwargs)
self.embed_model_h = EmbedModel(*args, **kwargs)
def forward_emb(self, gx, hx):
return torch.norm(torch.nn.functional.relu(gx-hx), dim=-1) |
# This file contains functions that have been replaced in
# analysisUtils by newer functions which use the ASDM bindings
# library. We need to keep these for usage on machines that do not
# have this library installed. - Todd Hunter
from __future__ import print_function # prevents adding old-style print statements
import os
import math
import numpy as np
from xml.dom import minidom
def readSoftwareVersionFromASDM_minidom(asdm):
"""
Reads the software version from the ASDM's Annotation.xml table.
- Todd Hunter
"""
if (os.path.exists(asdm) == False):
print("readSoftwareVersionFromASDM_minidom(): Could not find ASDM = ", asdm)
return(None)
if (os.path.exists(asdm+'/Annotation.xml') == False):
print("readSoftwareVersionFromASDM_minidom(): Could not find Annotation.xml. This dataset was probably taken prior to R10.6.")
return(None)
xmlscans = minidom.parse(asdm+'/Annotation.xml')
scandict = {}
rowlist = xmlscans.getElementsByTagName("row")
print('\n### Software version for ASDM: %s ###' % asdm)
for i,rownode in enumerate(rowlist):
row = rownode.getElementsByTagName("issue")
issue = str(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("details")
details = str(row[0].childNodes[0].nodeValue)
print("%s: %s" % (issue,details))
return
def getWVREfficienciesFromASDM(asdm):
mydict = {}
xml = asdm + '/CalReduction.xml'
xmlscans = minidom.parse(xml)
rowlist = xmlscans.getElementsByTagName("row")
fid = 0
mydict = {}
for rownode in rowlist:
row = rownode.getElementsByTagName("paramSet")
parameterSet = str(row[0].childNodes[0].nodeValue)
if (parameterSet.find('serialNumber') >= 0):
phrases = parameterSet.split()[2:]
for phrase in phrases:
serialNumber = phrase.find('serialNumber')
skyCoupling = phrase.find('skyCoupling')
if (serialNumber >= 0):
antenna = phrase[serialNumber+13:serialNumber+17]
if antenna not in mydict.keys():
mydict[antenna] = {}
mydict[antenna]['serialNumber'] = int(phrase[serialNumber+19:].rstrip('"'))
elif (skyCoupling >= 0):
antenna = phrase[skyCoupling+12:skyCoupling+16]
if antenna not in mydict.keys():
mydict[antenna] = {}
mydict[antenna]['skyCoupling'] = float(phrase[skyCoupling+19:].rstrip('"'))
return mydict
def getAntennaPadsFromASDM_minidom(asdm):
mydict = readStationFromASDM_minidom(asdm)
pads = []
for key in mydict.keys():
pad = mydict[key]['name'].strip()
padtype = mydict[key]['type']
if (padtype == ANTENNA_PAD):
pads.append(pad)
# if (pad.find('WSTB') < 0):
# pads.append(pad)
return pads
def readAntennaPositionFromASDM_minidom(sdmfile, antennaType=''):
"""
Reads the Antenna.xml file and returns a dictionary of all antennas
of the following format:
mydict = {'DV04': {'id': 0, 'position': [x,y,z]}}
-Todd Hunter
"""
if (os.path.exists(sdmfile) == False):
print("readAntennaPositionFromASDM_minidom(): Could not find file = ", sdmfile)
return(None)
xmlscans = minidom.parse(sdmfile+'/Antenna.xml')
scandict = {}
rowlist = xmlscans.getElementsByTagName("row")
fid = 0
stationName = 'unknown'
mydict = {}
positions = []
for rownode in rowlist:
stationPosition = []
scandict[fid] = {}
row = rownode.getElementsByTagName("antennaId")
stationId = int(str(row[0].childNodes[0].nodeValue).split('_')[-1])
row = rownode.getElementsByTagName("name")
stationName = str(row[0].childNodes[0].nodeValue).strip()
row = rownode.getElementsByTagName("position")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
for i in range(2,len(r)):
stationPosition.append(float(r[i]))
if antennaType == '' or stationName.find(antennaType)==0:
mydict[stationName] = {'id': fid, 'position': stationPosition}
fid +=1
positions.append(stationPosition)
if antennaType != '':
positions = np.array(positions)
medianVector = np.median(positions, axis=0)
positions = np.transpose(positions-medianVector)
print("median position: X=%+f Y=%+f Z=%+f" % (medianVector[0],medianVector[1],medianVector[2]))
print("rms variation: X=%+f Y=%+f Z=%+f" % (np.std(positions[0]),np.std(positions[1]),np.std(positions[2])))
return(mydict)
def readStationFromASDM_minidom(sdmfile):
"""
Reads the Station.xml file and returns a dictionary of all stations
of the following format:
mydict[0] = {'name': 'A085', 'position': [x,y,z]}
-Todd Hunter
"""
if (os.path.exists(sdmfile) == False):
print("readStationFromASDM_minidom(): Could not find file = ", sdmfile)
return(None)
xmlscans = minidom.parse(sdmfile+'/Station.xml')
scandict = {}
rowlist = xmlscans.getElementsByTagName("row")
fid = 0
stationName = 'unknown'
mydict = {}
for rownode in rowlist:
stationPosition = []
scandict[fid] = {}
row = rownode.getElementsByTagName("stationId")
stationId = int(str(row[0].childNodes[0].nodeValue).split('_')[-1])
row = rownode.getElementsByTagName("name")
stationName = str(row[0].childNodes[0].nodeValue).strip()
row = rownode.getElementsByTagName("type")
stationType = str(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("position")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
for i in range(2,len(r)):
stationPosition.append(float(r[i]))
mydict[stationId] = {'name': stationName, 'position': stationPosition, 'type': stationType}
fid +=1
return(mydict)
def readStationsFromASDM_minidom(sdmfile, station=None):
"""
Translates a station number (which start from 0) into the station name and
position from the Station.xml file. Useful for finding this information
for weather stations.
If station==None, then it builds and returns a dictionary where the key is
the station name and the value is the geocentric [X,Y,Z] position.
e.g. {'A001': [x,y,z]}
- Todd Hunter
"""
if (os.path.exists(sdmfile) == False):
print("readStationFromASDM()_minidom: Could not find file = ", sdmfile)
return(None)
xmlscans = minidom.parse(sdmfile+'/Station.xml')
scandict = {}
rowlist = xmlscans.getElementsByTagName("row")
fid = 0
stationName = 'unknown'
if (station == None):
mydict = {}
for rownode in rowlist:
stationPosition = []
scandict[fid] = {}
row = rownode.getElementsByTagName("stationId")
stationId = int(str(row[0].childNodes[0].nodeValue).split('_')[-1])
row = rownode.getElementsByTagName("name")
stationName = str(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("position")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
for i in range(2,len(r)):
stationPosition.append(float(r[i]))
if (stationId == station):
break
elif (station == None):
mydict[stationName] = stationPosition
fid +=1
if (station == None):
return(mydict)
else:
return(stationName,stationPosition)
def getSubscanTimesFromASDM_minidom(asdm, field=''):
"""
Reads the subscan information from the ASDM's Subscan.xml file and
returns a dictionary of form:
{scan: {subscan: {'field': '3c273, 'integrationTime': 2.016,
'numIntegration': 5, 'subscanLength': 10.08}}}
where the scan numbers are the top-level keys. The subscanLength is
computed by the difference between endTime and startTime. The integration
time is computed by dividing the subscanLength by numIntegration.
If the field name is specified, then limit the output to scans on this
field.
-- Todd Hunter
"""
subscanxml = asdm + '/Subscan.xml'
if (os.path.exists(subscanxml) == False):
print("Could not open %s" % (subscanxml))
return
xmlscans = minidom.parse(subscanxml)
rowlist = xmlscans.getElementsByTagName("row")
scandict = {}
scanNumbers = 0
subscanTotalLength = 0
for rownode in rowlist:
row = rownode.getElementsByTagName("scanNumber")
scanNumber = int(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("subscanNumber")
subscanNumber = int(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("startTime")
startTime = int(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("endTime")
endTime = int(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("numIntegration")
numIntegration = int(row[0].childNodes[0].nodeValue)
row = rownode.getElementsByTagName("fieldName")
fieldName = str(row[0].childNodes[0].nodeValue)
if (field=='' or fieldName==field):
subscanLength = (endTime-startTime)*1e-9
subscanTotalLength += subscanLength
integrationTime = subscanLength / (1.0*numIntegration)
if (scanNumber not in scandict):
if (scanNumber == 1):
scan1startTime = startTime
scandict[scanNumber] = {}
scanNumbers += 1
scandict[scanNumber][subscanNumber] = {'subscanLength': subscanLength, 'numIntegration': numIntegration, 'integrationTime': integrationTime, 'field': fieldName, 'startTime':startTime*1e-9, 'endTime':endTime*1e-9}
print("Found %d scans" % (scanNumbers))
totalTime = (endTime-scan1startTime)*1e-9
latency = totalTime - subscanTotalLength
print("Total latency = %g/%g seconds = %g percent" % (latency, totalTime, latency*100/totalTime))
return(scandict)
def readDecorrelationFromASDM_minidom(asdm):
"""
-Todd Hunter
"""
mydict = {}
seeingxml = asdm + '/CalPhase.xml'
if (os.path.exists(seeingxml) == False):
print("Could not open %s" % (seeingxml))
return
xml = minidom.parse(seeingxml)
rowlist = xml.getElementsByTagName("row")
mydict['basebandName'] = []
mydict['receiverBand'] = []
mydict['numReceptor'] = []
mydict['baselineLengths'] = []
mydict['decorrelationFactor'] = []
mydict['startValidTime'] = []
mydict['endValidTime'] = []
mydict['atmPhaseCorrection'] = []
mydict['integrationTime'] = []
mydict['azimuth'] = []
mydict['elevation'] = []
mydict['calDataId'] = []
for rownode in rowlist:
row = rownode.getElementsByTagName("startValidTime")
mydict['startValidTime'].append(int(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("endValidTime")
mydict['endValidTime'].append(int(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("atmPhaseCorrection")
mydict['atmPhaseCorrection'].append(str(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("receiverBand")
mydict['receiverBand'].append(str(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("basebandName")
mydict['basebandName'].append(str(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("numReceptor")
mydict['numReceptor'].append(int(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("calDataId")
mydict['calDataId'].append(int(str(row[0].childNodes[0].nodeValue).split('_')[1]))
row = rownode.getElementsByTagName("integrationTime")
mydict['integrationTime'].append(float(row[0].childNodes[0].nodeValue)*1e-9)
row = rownode.getElementsByTagName("baselineLengths")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
baselineLengths = []
for i in range(2,len(r)):
baselineLengths.append(float(r[i]))
mydict['baselineLengths'].append(baselineLengths)
row = rownode.getElementsByTagName("decorrelationFactor")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
decorrelationFactor = []
for i in range(3,len(r)):
decorrelationFactor.append(float(r[i]))
mydict['decorrelationFactor'].append(decorrelationFactor)
row = rownode.getElementsByTagName("direction")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
direction = []
for i in range(2,len(r)):
direction.append(float(r[i]))
mydict['azimuth'].append(math.degrees(direction[0]))
mydict['elevation'].append(math.degrees(direction[1]))
print("Found %d measurements on %d baselines" % (len(mydict['atmPhaseCorrection']), len(mydict['baselineLengths'][0])))
return mydict
def readSeeingFromASDM_minidom(asdm):
"""
Reads information from CalSeeing.xml into a dictionary
Returns a dictionary with the following keys:
atmPhaseCorrection: AP_UNCORRECTED or AP_CORRECTED
baselineLengths: typically 3 values (in meters)
startValidTime: MJD nano seconds
endValidTime: MJD nano seconds
phaseRMS: a value for each baselineLength (radians?) for each timestamp
seeing: one value per timestamp (arcseconds)
-Todd Hunter
"""
mydict = {}
seeingxml = asdm + '/CalSeeing.xml'
if (os.path.exists(seeingxml) == False):
print("Could not open %s" % (seeingxml))
return
xml = minidom.parse(seeingxml)
rowlist = xml.getElementsByTagName("row")
mydict['seeing'] = []
mydict['phaseRMS'] = []
mydict['startValidTime'] = []
mydict['endValidTime'] = []
mydict['atmPhaseCorrection'] = []
mydict['baselineLengths'] = []
mydict['phaseRMS'] = []
for rownode in rowlist:
row = rownode.getElementsByTagName("seeing")
mydict['seeing'].append(float(row[0].childNodes[0].nodeValue)*206264.8)
row = rownode.getElementsByTagName("startValidTime")
mydict['startValidTime'].append(int(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("endValidTime")
mydict['endValidTime'].append(int(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("atmPhaseCorrection")
mydict['atmPhaseCorrection'].append(str(row[0].childNodes[0].nodeValue))
row = rownode.getElementsByTagName("baselineLengths")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
baselineLengths = []
for i in range(2,len(r)):
baselineLengths.append(float(r[i]))
mydict['baselineLengths'].append(baselineLengths)
row = rownode.getElementsByTagName("phaseRMS")
r = list(filter(None,(row[0].childNodes[0].nodeValue).split(' ')))
phaseRMS = []
for i in range(2,len(r)):
phaseRMS.append(float(r[i]))
mydict['phaseRMS'].append(phaseRMS)
print("Found %d measurements" % (len(mydict['atmPhaseCorrection'])))
return mydict
def asdmspwmap(asdm):
"""
Generate a list that maps the spw number that will be found in the
measurement set to the corresponding value in the ASDM xml files.
In general, the order will be [0,n+1,n+2,....] where n=number of antennas
with WVR data. For example: [0,5,6,7...] if n=4 antennas, meaning
that spw 1 in the ms = spw 5 in the ASDM xml files.
-Todd Hunter
"""
mydict = readSpwsFromASDM_minidom(asdm)
spws = []
for i,spw in enumerate(mydict['spw']):
if (mydict['name'][i].find('WVR#Antenna') < 0):
spws.append(int(i))
return(spws)
def readSpwsFromASDM_minidom(asdm, verbose=False):
"""
Reads spw information from SpectralWindow.xml into a dictionary
Returns a dictionary with the following keys:
'spw': string number
'name': string e.g. 'WVR#NOMINAL'
-Todd Hunter
"""
mydict = {}
wvrAntennas = 0
spwxml = asdm + '/SpectralWindow.xml'
if (os.path.exists(spwxml) == False):
print("Could not open %s" % (spwxml))
return
xml = minidom.parse(spwxml)
rowlist = xml.getElementsByTagName("row")
mydict['spw'] = []
mydict['name'] = []
for rownode in rowlist:
row = rownode.getElementsByTagName("name")
name = str(row[0].childNodes[0].nodeValue)
mydict['name'].append(name)
row = rownode.getElementsByTagName("spectralWindowId")
mydict['spw'].append(str(row[0].childNodes[0].nodeValue).split('_')[1])
if (name.find('#Antenna') > 0):
wvrAntennas += 1
if verbose:
print("Found %d spws" % (len(mydict['spw'])))
if (wvrAntennas > 0):
print("but %d are only for the WVR filter frequencies." % (wvrAntennas))
return mydict
|
load("//third_party:maven_binaries.bzl", register_tooling = "register")
load("//third_party:third_party.bzl", "dependencies")
def bazelizer():
register_tooling()
dependencies() |
import os
import pickle
import re
import tensorflow as tf
from keras.models import load_model
from pymatgen import MPRester, Composition
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.io.vasp.sets import _load_yaml_config
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../data")
MODEL_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../models")
CONFIG = _load_yaml_config("MPRelaxSet")
LDAUU = CONFIG["INCAR"]['LDAUU']['O']
# ENTRIES_PATH = os.path.join(DATA_DIR, "garnet_entries_unique.json")
# GARNET_CALC_ENTRIES_PATH = os.path.join(DATA_DIR, "garnet_calc_entries.json")
# BINARY_OXIDES_PATH = os.path.join(DATA_DIR, "binary_oxide_entries.json")
# GARNET_ENTRIES_UNIQUE = loadfn(ENTRIES_PATH)
# GARNET_CALC_ENTRIES = loadfn(GARNET_CALC_ENTRIES_PATH)
# BINARY_OXDIES_ENTRIES = loadfn(BINARY_OXIDES_PATH)
ELS = {'garnet': {
'C': [get_el_sp(i) for i in
['Bi3+', 'Hf4+', 'Zr4+', 'La3+', 'Pr3+', 'Nd3+', 'Sm3+', 'Eu3+',
'Gd3+', 'Tb3+', 'Dy3+', 'Ho3+', 'Er3+', 'Tm3+', 'Yb3+', 'Lu3+',
'Y3+', 'Cd2+', 'Zn2+', 'Ba2+', 'Sr2+', 'Ca2+', 'Mg2+', 'Na+']],
'A': [get_el_sp(i) for i in
['Rh3+', 'Ru4+', 'Cr3+', 'Sb5+', 'Ta5+', 'Nb5+', 'Sn4+', 'Ge4+',
'Hf4+', 'Zr4+', 'Ti4+', 'In3+', 'Ga3+', 'Al3+', 'Lu3+', 'Yb3+',
'Tm3+', 'Er3+', 'Ho3+', 'Dy3+', 'Y3+', 'Sc3+', 'Zn2+', 'Mg2+',
'Li+']],
'D': [get_el_sp(i) for i in
['As5+', 'P5+', 'Sn4+', 'Ge4+', 'Si4+', 'Ti4+', 'Ga3+', 'Al3+',
'Li+']]},
'perovskite': {
'A': [get_el_sp(i) for i in
['Al3+', 'Ba2+', 'Bi3+', 'Ca2+', 'Cd2+', 'Ce3+', 'Ce4+', 'Dy3+',
'Er3+', 'Gd3+', 'Ho3+', 'La3+', 'Mg2+', 'Mn2+', 'Nd3+', 'Ni2+',
'Pb2+', 'Pd2+', 'Pr3+', 'Pt2+', 'Rh3+', 'Sc3+', 'Sm3+', 'Sn4+',
'Sr2+', 'Tb3+', 'Tl3+', 'Tm3+', 'Y3+', 'Zn2+']],
'B': [get_el_sp(i) for i in
["Al3+", "Au3+", "Bi3+", "Ce3+", "Ce4+", "Co2+", "Co3+", "Cr3+",
"Cu2+", "Dy3+", "Er3+", "Eu3+", "Fe2+", "Fe3+", "Ga3+", "Gd3+",
"Ge4+", "Hf4+", "Ho3+", "In3+", "Ir4+", "La3+", "Lu3+", "Mg2+",
"Mn2+", "Mn4+", "Mo4+", "Nd3+", "Ni2+", "Os4+", "Pb4+", "Pd4+",
"Pr3+", "Pt4+", "Re4+", "Rh3+", "Ru4+", "Sc3+", "Si4+", "Sm3+",
"Sn4+", "Ta5+", "Tb3+", "Tc4+", "Ti4+", "Tl3+", "Tm3+", "V5+",
"W4+", "Y3+", "Zn2+", "Zr4+"]]}
}
STD_FORMULA = {'garnet': Composition("C3A2D3O12"),
"perovskite": Composition("A2B2O6")}
SITES = {'garnet': ['c', 'a', 'd'],
'perovskite': ['a', 'b']} # use list to preserve order
SITE_INFO = {'garnet': {'c': {"num_atoms": 3, "max_ordering": 20, "cn": "VIII"},
'a': {"num_atoms": 2, "max_ordering": 7, "cn": "VI"},
'd': {"num_atoms": 3, "max_ordering": 18, "cn": "IV"}},
'perovskite': {'a': {"num_atoms": 2, "max_ordering": 10, 'cn': "XII"},
'b': {"num_atoms": 2, "max_ordering": 10, 'cn': "VI"}}}
m = MPRester("xNebFpxTfLhTnnIH")
MODELS = {}
def load_model_and_scaler(structure_type, model_type):
"""
Load model and scaler for Ef prediction.
Args:
structure_type (str): garnet or perovskite
model_type (str): type of models
ext_c : Extended model trained on unmix+cmix
ext_a : Extended model trained on unmix+amix
ext_d : Extended model trained on unmix+dmix
Returns:
model (keras.model)
scaler(keras.StandardScaler)
graph(tf.graph)
"""
MODEL_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../models/%s" % structure_type)
model = load_model(os.path.join(MODEL_DIR,
"model_%s.h5" % model_type))
graph = tf.get_default_graph()
with open(os.path.join(MODEL_DIR,
"scaler_%s.pkl" % model_type), "rb") as f:
scaler = pickle.load(f)
return model, scaler, graph
def html_formula(f):
return re.sub(r"([\d.]+)", r"<sub>\1</sub>", f)
def spe2form(structure_type, species):
"""
Transfer from a given species dict to the
standard perovskite formula. (A2B2O6)
Args:
structure_type (str): garnet or perovskite
species (dict): species in dictionary.
e.g. for Ca2Ti2O6,
species = {
"a": {"Ca2+": 1},
"b": {"Ti4+": 1}}
e.g. for CaSrTi2O6:
species = {"a": {"Ca2+":0.5,
"Sr2+": 0.5},
"b": {"Ti4+": 1}}
Returns:
formula (str)
"""
sites = SITES[structure_type]
spe_list = [spe.name + str(round(SITE_INFO[structure_type][site]['num_atoms'] \
* species[site][spe]))
for site in sites for
spe in sorted(species[site], key=lambda x: species[site][x])]
formula = "".join(spe_list)
num_oxy = int(STD_FORMULA[structure_type]['O'])
formula = formula.replace("1", "") + 'O%s' % num_oxy
return formula
def norm_species(structure_type, species):
# sites = SITES[structure_type]
# try:
# norm_species = {site:{spe.__str__(): SITE_INFO[structure_type][site]['num_atoms'] \
# / species[site][spe]}
# for site in sites for
# spe in species[site]}
# except:
# norm_species = {site: {get_el_sp(spe).__str__(): SITE_INFO[structure_type][site]['num_atoms'] \
# / species[site][spe]}
# for site in sites for
# spe in species[site]}
norm_spec = {}
sites = SITES[structure_type]
for site in sites:
norm_spec[site] = {}
for spe, amt in species[site].items():
if isinstance(spe, str):
spe = get_el_sp(spe)
norm_spec[site].update({spe: amt/SITE_INFO[structure_type][site]['num_atoms']})
return norm_spec
def parse_composition(structure_type, s, ctype):
toks = s.strip().split()
if len(toks) == 1:
c = Composition({toks[0].split(":")[0]: 1})
else:
c = Composition({t.split(":")[0]: float(t.split(":")[1])
for t in toks})
c = Composition({k2: v2 / sum(c.values()) for k2, v2 in c.items()})
if len(c) != 2:
raise ValueError("Bad composition on %s." % ctype)
frac = [c.get_atomic_fraction(k) for k in c.keys()]
if structure_type == 'garnet':
if ctype == "A":
if abs(frac[0] - 0.5) > 0.01:
raise ValueError("Bad composition on %s. "
"Only 1:1 mixing allowed!" % ctype)
elif ctype in ["C", "D"]:
if not (abs(frac[0] - 1.0 / 3) < 0.01 or abs(
frac[1] - 1.0 / 3) < 0.01):
raise ValueError("Bad composition on %s. "
"Only 2:1 mixing allowed!" % ctype)
elif structure_type == 'perovskite':
if abs(frac[0] - 0.5) > 0.01:
raise ValueError("Bad composition on %s. "
"Only 1:1 mixing allowed!" % ctype)
try:
for k in c.keys():
k.oxi_state
if k not in ELS[structure_type][ctype]:
raise ValueError("%s is not a valid species for %s site."
% (k, ctype))
except AttributeError:
raise ValueError("Oxidation states must be specified for all species!")
return c
|
#!/bin/env python
import argparse
import collections
import pprint
import pickle
import os
import re
import cbor
import logging
logr = logging.getLogger( __name__ )
# Exception type is part of the error signature
err_type_re_signature = {
"<type 'exceptions.OSError'>": re.compile( '([^:]+):?' ),
"<type 'exceptions.IOError'>": re.compile( '([^:]+):?' ),
"<class 'runcmd.Run_Cmd_Error'>":
re.compile( '<Run_Cmd_Error \((code=.+msg=[^:/]+).*:(.+)\n' ),
"<class 'billiard.exceptions.SoftTimeLimitExceeded'>": re.compile( '(.*)' ),
}
# Traceback lines to skip in error signature
re_traceback_ignore = re.compile(
'/(subprocess|os|genericpath|posixpath).py", '
'|' 'logging/__init__.py", '
'|' 'billiard/pool.py", '
'|' 'celery/app/(task|trace).py", ' )
def process_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument( 'infile' )
parser.add_argument( '-g', '--grep',
help='Like fgrep on raw text of each error' )
picklegroup = parser.add_argument_group( title='Pickling options',
description="""Specifying -n and -p at the same time will cause the source
file to be re-parsed and a new pickle file created.""")
picklegroup.add_argument( '--nopicklein', '-n', action='store_false', dest='picklein',
help="Don't read from pickled data file" )
picklegroup.add_argument( '--pickle', '-p', action='store_true', dest='pickleout',
help='Save pickled data in INFILE.pickle, clobbering an existing file' )
outputgroup = parser.add_argument_group( title='Output Details' )
outputgroup.add_argument( '--message', '-m', action='store_true',
help="Show one-line message for each instance of error type" )
outputgroup.add_argument( '--details', '-d', action='store_true',
help="Show details for each instance of error type" )
outputgroup.add_argument( '--raw', '-r', action='store_true',
help="Show raw exception for each error type." )
outputgroup.add_argument( '-a', '--attr', action='append', dest='attrlist',
help="Show specified attrs from each instance. Can be present multiple times." )
parser.add_argument( '--anydetails', action='store_true',
help=argparse.SUPPRESS )
limitgroup = parser.add_mutually_exclusive_group()
limitgroup.add_argument( '--show', '-s', type=int, metavar='N',
help="Show details for error number N (in list of errors) and exit" )
limitgroup.add_argument( '--include', '-i', action='append', metavar='INC',
help="""Show only errors with type matching INC.
Can be specified multiple times.""" )
limitgroup.add_argument( '--exclude', '-e', action='append', metavar='EXC',
help="""Show errors where type does NOT match EXC.
Can be specified multiple times.""" )
default_options = {
"picklein": True,
"message": False,
"details": False,
"raw": False,
"anydetails": False,
}
parser.set_defaults( **default_options )
args = parser.parse_args()
if args.message or args.details or args.raw or args.attrlist:
args.anydetails = True
return args
def get_error_signature( rec ):
etype = rec[ 'exception_type' ]
exception = rec[ 'exception' ]
try:
re_pattern = err_type_re_signature[ etype ]
except ( KeyError ) as e:
logr.error( 'ERROR while parsing record:\n{0}\n'.format( pprint.pformat( rec ) ) )
raise e
msg = ('____ Looking for signature match in exception:\n'
'{e}\n'
'____ for exception type:\n'
'{etype}').format( e = exception, etype = etype )
logr.debug( msg )
match = re_pattern.match( exception )
if not match:
raise UserWarning( 'No match found...\n{msg}'.format( msg = msg ) )
relevant_parts = [ etype, ' ' ]
logr.debug( 'Matches: {m}'.format( m = pprint.pformat( match.groups() ) ) )
relevant_parts.append( ''.join( match.groups() ) + '\n' )
for L in rec[ 'traceback' ].splitlines():
if L.startswith( ' File ' ) \
and not re_traceback_ignore.search( L ):
relevant_parts.append( L + '\n' )
return ''.join( relevant_parts )
def process_error_record( errdict, rec ):
e_sig = get_error_signature( rec )
e_msg = rec[ 'exception' ]
# e_details = rec
if e_sig not in errdict:
errdict[ e_sig ] = { 'instances': [] }
errdict[ e_sig ][ 'instances' ].append( rec )
def process_file( infile ):
errors = collections.OrderedDict()
with open( infile, 'rb' ) as f:
try:
while True:
rec = cbor.load( f )
process_error_record( errors, rec )
except ( EOFError ):
pass
return errors
def print_single_error( num, sig, data, args ):
qty = len( data[ 'instances' ] )
print( '' )
print( 'Error # {0:02d} Qty:{1}'.format( num, qty ) )
print( '='*22 )
print( sig )
if args.anydetails:
for i in data[ 'instances' ]:
print( '-'*50 )
if args.attrlist:
outfmt = '{1}'
if len( args.attrlist ) > 1:
outfmt = '{0} {1}'
for a in args.attrlist:
val = 'None'
if a in i:
val = i[a]
print( outfmt.format( a, val ) )
if args.message:
print( i[ 'exception' ] )
if args.details:
for k in [ 'args' ]:
print( '{k}: {v}'.format( k=k, v=i[ k ] ) )
if args.raw:
pprint.pprint( i )
def print_errors( errdict, args ):
err_indices = { i: e for i, e in enumerate( errdict, start=1) }
if args.show:
# Show only the requested error
e = err_indices[ args.show ]
print_single_error( args.show, e, errdict[e], args )
else:
total_error_count = 0
for i, e_sig in err_indices.iteritems():
# Print errors by default
print_ok = True
if args.include:
# limit errors by inclusion
print_ok = False
if any( x in e_sig for x in args.include ):
print_ok = True
if args.exclude:
# limit errors by exclusion
print_ok = True
if any( x in e_sig for x in args.exclude ):
print_ok = False
if print_ok:
qty = len( errdict[ e_sig ][ 'instances' ] )
total_error_count += qty
print_single_error( i, e_sig, errdict[ e_sig ], args )
print( "" )
fmt = "Total Error Count: {0}"
sz = len( fmt ) - 3 + len( str( total_error_count ) )
print( '='*sz )
print( fmt.format( total_error_count ) )
print( '='*sz )
def grep_errors( errors, args ):
for k,v in errors.iteritems():
for rec in v[ 'instances' ]:
record_as_string = pprint.pformat( rec )
if args.grep in record_as_string:
print record_as_string
if __name__ == "__main__":
loglvl = logging.WARNING
logging.basicConfig( level=loglvl )
args = process_cmdline()
head, tail = os.path.split( args.infile )
pickle_fn = os.path.join( head, '{0}.pickle'.format( tail ) )
if args.picklein and os.path.exists( pickle_fn ):
with open( pickle_fn, 'rb' ) as f:
errors = pickle.load( f )
else:
errors = process_file( args.infile )
if args.grep:
grep_errors( errors, args )
else:
print_errors( errors, args )
if args.pickleout:
with open( pickle_fn, 'wb' ) as f:
pickle.dump( errors, f )
|
# @file NuGet.py
# This module contains code that knows how to download nuget
#
# Copyright (c), Microsoft Corporation
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import os
import urllib.error
import urllib.request
import logging
# Update this when you want a new version of NuGet
VERSION = "5.1.0"
URL = "https://dist.nuget.org/win-x86-commandline/v{}/nuget.exe".format(VERSION)
SHA256 = "0ace4f53493332c9a75291ee96acd76b371b4e687175e4852bf85948176d7152"
def DownloadNuget(unpack_folder=None):
if unpack_folder is None:
unpack_folder = os.path.dirname(__file__)
out_file_name = os.path.join(unpack_folder, "NuGet.exe")
# check if we have the nuget file already downloaded
if not os.path.isfile(out_file_name):
try:
# Download the file and save it locally under `temp_file_name`
with urllib.request.urlopen(URL) as response, open(out_file_name, 'wb') as out_file:
out_file.write(response.read())
except urllib.error.HTTPError as e:
logging.error(f"We ran into an issue when getting NuGet")
raise e
# do the hash to make sure the file is good
with open(out_file_name, "rb") as file:
import hashlib
temp_file_sha256 = hashlib.sha256(file.read()).hexdigest()
if temp_file_sha256 != SHA256:
os.remove(out_file_name)
raise RuntimeError(f"Nuget - sha256 does not match\n\tdownloaded:\t{temp_file_sha256}\n\t")
|
import random
import unittest
from putput.combiner import combine
from putput.joiner import ComboOptions
from tests.unit.helper_functions import compare_all_pairs
class TestCombiner(unittest.TestCase):
def setUp(self):
random.seed(0)
def test_one_token(self) -> None:
utterance_combo = (('the beatles', 'kanye'),)
tokens = ('ARTIST',)
groups = (('None', 1),)
_, generator = combine(utterance_combo, tokens, groups)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('the beatles', 'kanye')
expected_handled_tokens = (('[ARTIST(the beatles)]',), ('[ARTIST(kanye)]',))
expected_handled_groups = (('{None([ARTIST(the beatles)])}',), ('{None([ARTIST(kanye)])}',))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
def test_multiple_tokens(self) -> None:
utterance_combo = (('he will want', 'she will want'), ('to play', 'to listen'))
tokens = ('START', 'PLAY')
groups = (('None', 1), ('None', 1))
_, generator = combine(utterance_combo, tokens, groups)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('he will want to play', 'he will want to listen',
'she will want to play', 'she will want to listen')
expected_handled_tokens = (('[START(he will want)]', '[PLAY(to play)]',),
('[START(he will want)]', '[PLAY(to listen)]',),
('[START(she will want)]', '[PLAY(to play)]',),
('[START(she will want)]', '[PLAY(to listen)]',))
expected_handled_groups = (('{None([START(he will want)])}', '{None([PLAY(to play)])}'),
('{None([START(he will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(she will want)])}', '{None([PLAY(to play)])}'),
('{None([START(she will want)])}', '{None([PLAY(to listen)])}'))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
def test_default_token_handler(self) -> None:
# pylint: disable=too-many-locals
_custom_token_handler = lambda token, tokenized_phrase: '[{}({})]'.format(token, tokenized_phrase)
utterance_combo = (('he will want', 'she will want'), ('to play', 'to listen'))
tokens = ('START', 'PLAY')
groups = (('None', 1), ('None', 1))
token_handler_map = {'DEFAULT': _custom_token_handler}
_, generator = combine(utterance_combo, tokens, groups, token_handler_map=token_handler_map)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('he will want to play', 'he will want to listen',
'she will want to play', 'she will want to listen')
expected_handled_tokens = (('[START(he will want)]', '[PLAY(to play)]',),
('[START(he will want)]', '[PLAY(to listen)]',),
('[START(she will want)]', '[PLAY(to play)]',),
('[START(she will want)]', '[PLAY(to listen)]',))
expected_handled_groups = (('{None([START(he will want)])}', '{None([PLAY(to play)])}'),
('{None([START(he will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(she will want)])}', '{None([PLAY(to play)])}'),
('{None([START(she will want)])}', '{None([PLAY(to listen)])}'))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
def test_token_token_handler(self) -> None:
# pylint: disable=too-many-locals
_custom_token_handler = lambda token, tokenized_phrase: '[{}]'.format(token)
utterance_combo = (('he will want', 'she will want'), ('to play', 'to listen'))
tokens = ('START', 'PLAY')
groups = (('None', 1), ('None', 1))
token_handler_map = {'START': _custom_token_handler}
_, generator = combine(utterance_combo, tokens, groups, token_handler_map=token_handler_map)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('he will want to play', 'he will want to listen',
'she will want to play', 'she will want to listen')
expected_handled_tokens = (('[START]', '[PLAY(to play)]',), ('[START]', '[PLAY(to listen)]',),
('[START]', '[PLAY(to play)]',), ('[START]', '[PLAY(to listen)]',))
expected_handled_groups = (('{None([START])}', '{None([PLAY(to play)])}'),
('{None([START])}', '{None([PLAY(to listen)])}'),
('{None([START])}', '{None([PLAY(to play)])}'),
('{None([START])}', '{None([PLAY(to listen)])}'))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
def test_token_and_default_token_handler(self) -> None:
# pylint: disable=too-many-locals
_start_token_handler = lambda token, tokenized_phrase: '[{}]'.format(token)
_default_token_handler = lambda token, tokenized_phrase: '[{}(default)]'.format(token)
utterance_combo = (('he will want', 'she will want'), ('to play', 'to listen'))
tokens = ('START', 'PLAY')
groups = (('None', 1), ('None', 1))
token_handler_map = {'START': _start_token_handler, 'DEFAULT': _default_token_handler}
_, generator = combine(utterance_combo, tokens, groups, token_handler_map=token_handler_map)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('he will want to play', 'he will want to listen',
'she will want to play', 'she will want to listen')
expected_handled_tokens = (('[START]', '[PLAY(default)]',), ('[START]', '[PLAY(default)]',),
('[START]', '[PLAY(default)]',), ('[START]', '[PLAY(default)]',))
expected_handled_groups = (('{None([START])}', '{None([PLAY(default)])}'),
('{None([START])}', '{None([PLAY(default)])}'),
('{None([START])}', '{None([PLAY(default)])}'),
('{None([START])}', '{None([PLAY(default)])}'))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
def test_combo_options_without_replacement(self) -> None:
utterance_combo = (('he will want', 'she will want'), ('to play', 'to listen'))
tokens = ('START', 'PLAY')
groups = (('None', 1), ('None', 1))
combo_options = ComboOptions(max_sample_size=6, with_replacement=False)
_, generator = combine(utterance_combo, tokens, groups, combo_options=combo_options)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('he will want to play', 'he will want to listen',
'she will want to play', 'she will want to listen')
expected_handled_tokens = (('[START(he will want)]', '[PLAY(to play)]',),
('[START(he will want)]', '[PLAY(to listen)]',),
('[START(she will want)]', '[PLAY(to play)]',),
('[START(she will want)]', '[PLAY(to listen)]',))
expected_handled_groups = (('{None([START(she will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(he will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(he will want)])}', '{None([PLAY(to play)])}'),
('{None([START(she will want)])}', '{None([PLAY(to play)])}'))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
def test_combo_options_with_replacement(self) -> None:
utterance_combo = (('he will want', 'she will want'), ('to play', 'to listen'))
tokens = ('START', 'PLAY')
groups = (('None', 1), ('None', 1))
combo_options = ComboOptions(max_sample_size=6, with_replacement=True)
_, generator = combine(utterance_combo, tokens, groups, combo_options=combo_options)
actual_utterances, actual_handled_tokens, actual_handled_groups = zip(*generator)
expected_utterances = ('she will want to listen', 'she will want to listen',
'he will want to play', 'she will want to play',
'she will want to listen', 'she will want to listen')
expected_handled_tokens = (('[START(she will want)]', '[PLAY(to listen)]'),
('[START(she will want)]', '[PLAY(to listen)]'),
('[START(he will want)]', '[PLAY(to play)]'),
('[START(she will want)]', '[PLAY(to play)]'),
('[START(she will want)]', '[PLAY(to listen)]'),
('[START(she will want)]', '[PLAY(to listen)]'))
expected_handled_groups = (('{None([START(she will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(she will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(he will want)])}', '{None([PLAY(to play)])}'),
('{None([START(she will want)])}', '{None([PLAY(to play)])}'),
('{None([START(she will want)])}', '{None([PLAY(to listen)])}'),
('{None([START(she will want)])}', '{None([PLAY(to listen)])}'))
pairs = [(actual_utterances, expected_utterances),
(actual_handled_tokens, expected_handled_tokens),
(actual_handled_groups, expected_handled_groups)]
compare_all_pairs(self, pairs)
if __name__ == '__main__':
unittest.main()
|
import pytest
import pathlib
from typing import List
import shutil
import io
from cincan.frontend import ToolImage
@pytest.fixture(autouse=True, scope="function")
def disable_tty_interactive(monkeypatch):
"""Mock stdin to make tty part of tests to complete"""
monkeypatch.setattr('sys.stdin', io.StringIO(''))
monkeypatch.setattr('sys.stdin.fileno', lambda : 0)
@pytest.fixture(scope='function')
def tool(request):
# Do not remove automatically, will be used multiple times
tool = ToolImage(image='quay.io/cincan/test:dev', rm=False)
yield tool
@pytest.fixture(scope="session", autouse=True)
def delete_temporary_files(request, tmp_path_factory):
"""Cleanup a testing directory once we are finished."""
_tmp_path_factory = tmp_path_factory
def cleanup():
tmp_path = _tmp_path_factory.getbasetemp()
if pathlib.Path(tmp_path).exists() and pathlib.Path(tmp_path).is_dir():
shutil.rmtree(tmp_path)
request.addfinalizer(cleanup)
def prepare_work_dir(name: str, with_files: List['str']) -> pathlib.Path:
src_root = pathlib.Path('samples')
root = pathlib.Path(name)
if root.is_dir():
shutil.rmtree(root, ignore_errors=True)
root.mkdir()
for f_name in with_files:
src = src_root / f_name
dst = root / f_name
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(src, dst)
shutil.copystat(src, dst)
return root |
from typing import List
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# Array -> Listed-List
@classmethod
def array_to_list(cls, data: List):
if len(data) == 0:
return None
head = cls()
p = cls()
p.next = head
for _, x in enumerate(data):
p = p.next
p.val = x
p.next = cls()
p.next = None
return head
# Listed-List -> Array
@classmethod
def list_to_array(cls, head):
ans = []
while head != None:
ans.append(head.val)
head = head.next
return ans
if __name__ == "__main__":
pass |
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Sergiu Mosanu <sm7ed@virginia.edu>
# Copyright (c) 2020-2021 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2020 Antmicro <www.antmicro.com>
#
# SPDX-License-Identifier: BSD-2-Clause
# To interface via the serial port use:
# lxterm /dev/ttyUSBx --speed=115200
import os
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex_boards.platforms import xilinx_alveo_u280
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.builder import *
from litex.soc.interconnect.axi import *
from litex.soc.interconnect.csr import *
from litex.soc.cores.ram.xilinx_usp_hbm2 import USPHBM2
from litex.soc.cores.led import LedChaser
from litedram.modules import MTA18ASF2G72PZ
from litedram.phy import usddrphy
from litepcie.phy.usppciephy import USPPCIEPHY
from litepcie.software import generate_litepcie_software
from litedram.common import *
from litedram.frontend.axi import *
from litescope import LiteScopeAnalyzer
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, ddram_channel, with_hbm):
if with_hbm:
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_hbm_ref = ClockDomain()
self.clock_domains.cd_apb = ClockDomain()
else: # ddr4
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain()
self.clock_domains.cd_pll4x = ClockDomain()
self.clock_domains.cd_idelay = ClockDomain()
# # #
if with_hbm:
self.submodules.pll = pll = USMMCM(speedgrade=-2)
pll.register_clkin(platform.request("sysclk", ddram_channel), 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_hbm_ref, 100e6)
pll.create_clkout(self.cd_apb, 100e6)
platform.add_false_path_constraints(self.cd_sys.clk, self.cd_apb.clk)
else: # ddr4
self.submodules.pll = pll = USMMCM(speedgrade=-2)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(platform.request("sysclk", ddram_channel), 100e6)
pll.create_clkout(self.cd_pll4x, sys_clk_freq*4, buf=None, with_reset=False)
pll.create_clkout(self.cd_idelay, 600e6) #, with_reset=False
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.specials += [
Instance("BUFGCE_DIV", name="main_bufgce_div",
p_BUFGCE_DIVIDE=4,
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys.clk),
Instance("BUFGCE", name="main_bufgce",
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys4x.clk),
# AsyncResetSynchronizer(self.cd_idelay, ~pll.locked),
]
self.submodules.idelayctrl = USIDELAYCTRL(cd_ref=self.cd_idelay, cd_sys=self.cd_sys)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(150e6), ddram_channel=0, with_pcie=False, with_led_chaser=False, with_hbm=False, **kwargs):
platform = xilinx_alveo_u280.Platform()
if with_hbm:
assert 225e6 <= sys_clk_freq <= 450e6
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq, ddram_channel, with_hbm)
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq, ident="LiteX SoC on Alveo U280 (ES1)", **kwargs)
# HBM / DRAM -------------------------------------------------------------------------------
if with_hbm:
# JTAGBone -----------------------------------------------------------------------------
#self.add_jtagbone(chain=2) # Chain 1 already used by HBM2 debug probes.
# Add HBM Core.
self.submodules.hbm = hbm = ClockDomainsRenamer({"axi": "sys"})(USPHBM2(platform))
# Get HBM .xci.
os.system("wget https://github.com/litex-hub/litex-boards/files/6893157/hbm_0.xci.txt")
os.makedirs("ip/hbm", exist_ok=True)
os.system("mv hbm_0.xci.txt ip/hbm/hbm_0.xci")
# Connect four of the HBM's AXI interfaces to the main bus of the SoC.
for i in range(4):
axi_hbm = hbm.axi[i]
axi_lite_hbm = AXILiteInterface(data_width=256, address_width=33)
self.submodules += AXILite2AXI(axi_lite_hbm, axi_hbm)
self.bus.add_slave(f"hbm{i}", axi_lite_hbm, SoCRegion(origin=0x4000_0000 + 0x1000_0000*i, size=0x1000_0000)) # 256MB.
# Link HBM2 channel 0 as main RAM
self.bus.add_region("main_ram", SoCRegion(origin=0x4000_0000, size=0x1000_0000, linker=True)) # 256MB.
else:
# DDR4 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = usddrphy.USPDDRPHY(platform.request("ddram", ddram_channel),
memtype = "DDR4",
cmd_latency = 1, # seems to work better with cmd_latency=1
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 600e6,
is_rdimm = True)
self.add_sdram("sdram",
phy = self.ddrphy,
module = MTA18ASF2G72PZ(sys_clk_freq, "1:4"),
size = 0x40000000,
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Firmware RAM (To ease initial LiteDRAM calibration support) --------------------------
self.add_ram("firmware_ram", 0x20000000, 0x8000)
# PCIe -------------------------------------------------------------------------------------
if with_pcie:
self.submodules.pcie_phy = USPPCIEPHY(platform, platform.request("pcie_x4"),
data_width = 128,
bar0_size = 0x20000)
self.add_pcie(phy=self.pcie_phy, ndmas=1)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("gpio_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
from litex.soc.integration.soc import LiteXSoCArgumentParser
parser = LiteXSoCArgumentParser(description="LiteX SoC on Alveo U280")
target_group = parser.add_argument_group(title="Target options")
target_group.add_argument("--build", action="store_true", help="Build bitstream.")
target_group.add_argument("--load", action="store_true", help="Load bitstream.")
target_group.add_argument("--sys-clk-freq", default=150e6, help="System clock frequency.") # HBM2 with 250MHz, DDR4 with 150MHz (1:4)
target_group.add_argument("--ddram-channel", default="0", help="DDRAM channel (0, 1, 2 or 3).") # also selects clk 0 or 1
target_group.add_argument("--with-pcie", action="store_true", help="Enable PCIe support.")
target_group.add_argument("--driver", action="store_true", help="Generate PCIe driver.")
target_group.add_argument("--with-hbm", action="store_true", help="Use HBM2.")
target_group.add_argument("--with-analyzer", action="store_true", help="Enable Analyzer.")
target_group.add_argument("--with-led-chaser", action="store_true", help="Enable LED Chaser.")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
if args.with_hbm:
args.sys_clk_freq = 250e6
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
ddram_channel = int(args.ddram_channel, 0),
with_pcie = args.with_pcie,
with_led_chaser = args.with_led_chaser,
with_hbm = args.with_hbm,
with_analyzer = args.with_analyzer,
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.driver:
generate_litepcie_software(soc, os.path.join(builder.output_dir, "driver"))
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))
if __name__ == "__main__":
main()
|
magic = {
'SX_OBJECT': b'\x00', # ( 0): Already stored object
'SX_LSCALAR': b'\x01', # ( 1): Scalar (large binary) follows (length, data)
'SX_ARRAY': b'\x02', # ( 2): Array forthcoming (size, item list)
'SX_HASH': b'\x03', # ( 3): Hash forthcoming (size, key/value pair list)
'SX_REF': b'\x04', # ( 4): Reference to object forthcoming
'SX_UNDEF': b'\x05', # ( 5): Undefined scalar
'SX_INTEGER': b'\x06', # ( 6): Undefined scalar
'SX_DOUBLE': b'\x07', # ( 7): Double forthcoming
'SX_BYTE': b'\x08', # ( 8): (signed) byte forthcoming
'SX_NETINT': b'\x09', # ( 9): Integer in network order forthcoming
'SX_SCALAR': b'\x0a', # (10): Scalar (binary, small) follows (length, data)
'SX_TIED_ARRAY': b'\x0b', # (11): Tied array forthcoming
'SX_TIED_HASH': b'\x0c', # (12): Tied hash forthcoming
'SX_TIED_SCALAR': b'\x0d', # (13): Tied scalar forthcoming
'SX_SV_UNDEF': b'\x0e', # (14): Perl's immortal PL_sv_undef
'SX_BLESS': b'\x11', # (17): Object is blessed
'SX_IX_BLESS': b'\x12', # (18): Object is blessed, classname given by index
'SX_HOOK': b'\x13', # (19): Stored via hook, user-defined
'SX_OVERLOAD': b'\x14', # (20): Overloaded reference
'SX_TIED_KEY': b'\x15', # (21): Tied magic key forthcoming
'SX_TIED_IDX': b'\x16', # (22): Tied magic index forthcoming
'SX_UTF8STR': b'\x17', # (23): UTF-8 string forthcoming (small)
'SX_LUTF8STR': b'\x18', # (24): UTF-8 string forthcoming (large)
'SX_FLAG_HASH': b'\x19', # (25): Hash with flags forthcoming (size, flags, key/flags/value triplet list)
}
|
from threading import Thread
import traceback
import os.path
import remi.gui as gui
from remi import start, App
from k40nano import PngPlotter
from .EgvParser import parse_egv
from .EgvSend import send_egv
class K40WebServer(App):
def __init__(self, *args):
res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')
working_path = os.getcwd()
super(K40WebServer, self).__init__(*args, static_file_path={'my_resources':res_path, 'output':working_path})
self.selected_file = None
self.print_thread = None
self.stop = False
def main(self):
container = gui.VBox(width=500, height=500)
self.status_lbl = gui.Label('First Upload EGV')
upload_box = gui.VBox()
upload_lbl = gui.Label('Upload EGV')
self.upload_bt = gui.FileUploader('./', width=200, height=30, margin='10px')
upload_box.append(upload_lbl)
upload_box.append(self.upload_bt)
self.upload_bt.onsuccess.connect(self.fileupload_on_success)
self.upload_bt.onfailed.connect(self.fileupload_on_failed)
self.preview = gui.Image('/my_resources:empty-image.png', width=200, height=200)
self.print_bt = gui.Button('Print Uploaded Design')
self.print_bt.onclick.connect(self.print_button_pressed)
self.print_bt.set_enabled(False)
self.stop_bt = gui.Button('Stop Printing')
self.stop_bt.onclick.connect(self.stop_button_pressed)
self.stop_bt.set_enabled(False)
# appending a widget to another
container.append(self.status_lbl)
container.append(upload_box)
container.append(self.preview)
container.append(self.print_bt)
container.append(self.stop_bt)
# returning the root widget
return container
def clear_selected_file(self):
self.preview.set_image('/my_resources:empty-image.png')
self.selected_file = None
self.print_bt.set_enabled(False)
self.stop_bt.set_enabled(False)
self.upload_bt.set_enabled(True)
# listener function
def fileupload_on_success(self, widget, filename):
try:
plotter = PngPlotter(filename + '.png')
parse_egv(filename, plotter)
plotter.close()
self.preview.set_image('/output:{}.png'.format(filename))
except:
print(traceback.format_exc())
self.status_lbl.set_text('Invalid EGV: ' + filename)
self.clear_selected_file()
return
self.status_lbl.set_text('Ready to Print: ' + filename)
self.selected_file = filename
self.print_bt.set_enabled(True)
def fileupload_on_failed(self, widget, filename):
self.status_lbl.set_text('Upload Failed: ' + filename)
self.clear_selected_file()
def print_button_pressed(self, widget):
self.status_lbl.set_text('Printing: ' + self.selected_file)
self.print_bt.set_enabled(False)
self.upload_bt.set_enabled(False)
self.stop_bt.set_enabled(True)
self.stop = False
self.print_thread = Thread(target=self.print_job)
self.print_thread.start()
def stop_button_pressed(self, widget):
pass
def print_job(self):
try:
send_egv(self.selected_file)
self.status_lbl.set_text('Print Completed: ' + self.selected_file)
except Exception as e:
print(traceback.format_exc())
inputDialog = gui.GenericDialog('Print Error', str(e))
inputDialog.show(self)
self.status_lbl.set_text('Print Error: ' + self.selected_file)
self.clear_selected_file()
if __name__ == '__main__':
# starts the web server
start(K40WebServer, debug=True, address='0.0.0.0', port=8081, start_browser=False, multiple_instance=False)
|
from os.path import exists
import seaborn as sns
from analyze_results import get_number_of_edges
from helper import file_utils as file, io_utils as io
import pandas as pd
import numpy as np
from visualization.visualize_gcn import generate_doc_labels
from visualization.visualize_tsne import reduce_dimensions, visualize_highlight
sns.set(style='darkgrid', color_codes=True)
def analyze_relations(dataset, id1, id2):
doc2id = file.get_doc2id(dataset)
vocab = file.get_entity2id(dataset)
docs = file.get_sentences(dataset)
triples = file.get_document_triples(dataset)
filtered = file.get_filtered_triples(dataset)
all_props = file.get_all_relations()
connections = triples[(triples["doc1"] == id1) & (triples["doc2"] == id2)]
details = connections["detail"].tolist()[0].split("+")
assert len(details) == connections["relations"].tolist()[0]
doc1 = docs[id1]
doc2 = docs[id2]
doc1_ids = doc2id[doc2id["doc"] == id1]["wikiID"].tolist()
doc2_ids = doc2id[doc2id["doc"] == id2]["wikiID"].tolist()
all_relations = doc1_ids + doc2_ids
all_relations = list(dict.fromkeys(all_relations))
entities_doc1 = []
entities_doc2 = []
result1 = filtered[(filtered["entity1"].isin(doc1_ids)) & (filtered["entity2"].isin(doc2_ids))]
result2 = filtered[(filtered["entity2"].isin(doc1_ids)) & (filtered["entity1"].isin(doc2_ids))]
merged_results = pd.concat([result1, result2]).reset_index(drop=True)
assert merged_results.shape[0] == connections["relations"].tolist()[0]
for relation in doc1_ids:
word = vocab[vocab["wikiID"] == relation]["word"].tolist()
entities_doc1.append([word, relation])
for relation in doc2_ids:
word = vocab[vocab["wikiID"] == relation]["word"].tolist()
entities_doc2.append([word, relation])
count1 = 0
count2 = 0
for w in entities_doc1:
word = w[0][0]
if word in doc1 and len(word) > 1:
count1 += 1
doc1 = doc1.replace(word, "\hl{" + word + "}")
for w in entities_doc2:
word = w[0][0]
if word in doc2 and len(word) > 1:
count2 += 1
doc2 = doc2.replace(word, "\hl{" + word + "}")
print(doc1)
print("\n\n\n")
print(doc2)
print(entities_doc1)
print(merged_results)
labeld_aray = []
for index, row in merged_results.iterrows():
entity1 = row["entity1"]
entity2 = row["entity2"]
rel = row["relations"]
word1 = vocab[vocab["wikiID"] == entity1]["word"].tolist()[0]
word2 = vocab[vocab["wikiID"] == entity2]["word"].tolist()[0]
desc = all_props[all_props["ID"] == rel]["label"].tolist()[0]
labeld_aray.append([word1, desc, word2])
labeled_df = pd.DataFrame(labeld_aray)
print(labeled_df)
def get_max_min_values(dataset, type, n=5):
results_log = file.get_eval_logs(dataset=dataset)
results_log = results_log[results_log["raw_count"] == type]
maximum = results_log.nlargest(n, columns="accuracy")
minimum = results_log.nsmallest(n, columns="accuracy")
return maximum, minimum
def get_base_lowest(dataset, n=5):
results_log = file.get_eval_logs(dataset=dataset)
minimum = results_log[results_log["wiki_enabled"] == False].nsmallest(n, columns="accuracy")
maximum = pd.DataFrame()
return maximum, minimum
def remove_wrongs(edges):
for dataset in edges.keys():
counts = edges[dataset]
max_nonzero = len(counts) - 2
results_log = file.get_eval_logs(dataset=dataset)
indices = results_log[results_log["threshold"] > max_nonzero].index
results_log.loc[indices, 'wiki_enabled'] = False
file.save_eval_logs(results_log, dataset=dataset)
def get_graph_details(dataset):
base_edges = file.get_base_edges(dataset)
types = set(base_edges["edge_type"].tolist())
for t in types:
count = base_edges[base_edges["edge_type"] == t].shape[0]
print(t, count)
# print(base_edges.head())
ohsumed_colors = [
[0.87696976, 0.53662197, 0.20161359],
[0.39785174, 0.14077558, 0.03484343],
[0.40281644, 0.87598413, 0.23823897],
[0.94033192, 0.17783354, 0.65802753],
[0.57713418, 0.1947504, 0.12719329],
[0.44039277, 0.29924405, 0.72650093],
[0.38028269, 0.89793327, 0.58556525],
[0.16676958, 0.61377713, 0.73910106],
[0.8451732, 0.646984, 0.39463175],
[0.03802899, 0.74157645, 0.23329789],
[0.70155728, 0.37348221, 0.67676925],
[0.84022014, 0.39748405, 0.64789638],
[0.83657054, 0.96644718, 0.74838346],
[0.45796036, 0.25250949, 0.40888393],
[0.27205942, 0.16957816, 0.51482936],
[0.35751255, 0.36778616, 0.2032978 ],
[0.87786575, 0.06082114, 0.73051948],
[0.26296849, 0.92915212, 0.49388716],
[0.05749447, 0.6363974, 0.60248133],
[0.11981351, 0.58317452, 0.70530186],
[0.89247519, 0.74573484, 0.61559013],
[0.99396792, 0.15785788, 0.154068],
[0.8410705, 0.71555522, 0.04468367]
]
mr_colors = ["red", "blue"]
r52_colors = colors = np.random.rand(52, 3)
r8_colors = ["r", "b", "g", "y", "c", "m", "k", "burlywood"]
dataset_colors = {
"mr": mr_colors,
"r8": r8_colors,
"ohsumed": ohsumed_colors,
"r52": r52_colors
}
def analyze_doc_embeddings(dataset, path, id1, id2, filename):
embeddings = io.read_csv(path, sep=",")
reduced_emb_doc = reduce_dimensions(embeddings)
doc_labels = generate_doc_labels(embeddings, dataset)
label1 = doc_labels[id1]
label2 = doc_labels[id2]
# assert label1 == label2
visualize_highlight(reduced_emb_doc, id1, id2, label1, filename=filename, labels=doc_labels, colors=dataset_colors[dataset])
top_words_dict = {
"ohsumed": {},
"r8": {},
"r52": {},
"mr": {}
}
def analyze_word_embeddings(dataset, path, threshold, edge_type, best, n=10):
global top_words_dict
embeddings = io.read_csv(path, sep=",")
embeddings_array = embeddings.to_numpy().tolist()
unique_labels = sorted(list(set([label.split("\t")[2] for label in file.get_labels(dataset)])), reverse=True)
vocab = file.get_vocab(dataset)
max_indices = []
max_values = []
all_words = []
all_labels = []
results_dict = {}
for index, emb in enumerate(embeddings_array):
array = np.array(emb)
max_index = array.argmax()
max_indices.append(max_index)
max_values.append(array[max_index])
all_words.append(vocab[index])
all_labels.append(unique_labels[max_index])
results_dict[index] = {
"max_index": max_index,
"max_value": array[max_index],
"word": vocab[index],
"label": unique_labels[max_index]
}
assert len(max_values) == len(max_indices) == len(all_words) == len(all_labels)
results_df = pd.DataFrame.from_dict(results_dict,orient="index")
top_words = {}
for u_label in unique_labels:
largest = results_df[results_df["label"] == u_label].nlargest(n, columns="max_value")["word"].tolist()
top_words[u_label] = largest
# print(top_words)
# key = f"{threshold}:{edge_type}"
if best:
top_words_dict[dataset][edge_type] = top_words
def get_embeddings_from_disk(maximum, minimum, type, dataset, layer):
max_counter = 0
min_counter = 0
for index, row in maximum.iterrows():
directory = row["time"]
doc_path = f"/Volumes/Data/NewLogs/{dataset.title()}/{directory}/plots/gcn_embeddings/{dataset}_doc_embeddings_layer{layer}.csv"
word_path = f"/Volumes/Data/NewLogs/{dataset.title()}/{directory}/plots/gcn_embeddings/{dataset}_word_embeddings_layer{layer}.csv"
if exists(doc_path):
analyze_doc_embeddings(dataset, doc_path, 158, 175, f"{io.get_basic_embeddings_plots_path(dataset)}/{dataset}_{type}_max_doc_emb_layer{layer}.png")
# analyze_word_embeddings(dataset, word_path, row["threshold"], type, True, n=5)
print(f"Plotted {directory} for {dataset}")
break
else:
max_counter += 1
if max_counter == maximum.shape[0]:
print("No files found for maximum")
for index, row in minimum.iterrows():
directory = row["time"]
doc_path = f"/Volumes/Data/NewLogs/{dataset.title()}/{directory}/plots/gcn_embeddings/{dataset}_doc_embeddings_layer{layer}.csv"
word_path = f"/Volumes/Data/NewLogs/{dataset.title()}/{directory}/plots/gcn_embeddings/{dataset}_word_embeddings_layer{layer}.csv"
if exists(doc_path):
analyze_doc_embeddings(dataset, doc_path, 158, 175, f"{io.get_basic_embeddings_plots_path(dataset)}/{dataset}_{type}_min_doc_emb_layer{layer}.png")
# analyze_word_embeddings(dataset, word_path, row["threshold"], type, type == "base", n=5)
print(f"Plotted {directory} for {dataset}")
break
else:
min_counter += 1
if min_counter == minimum.shape[0]:
print("No files found for minimum")
def plot_all():
datasets = ["mr", "r8", "r52", "ohsumed"]
# types = ["count", "count_norm", "count_norm_pmi", "idf", "idf_norm", "idf_norm_pmi", "idf_wiki", "idf_wiki_norm",
# "idf_wiki_norm_pmi"]
types = ["count", "idf_wiki", "idf_wiki_norm", "idf_wiki_norm_pmi"]
for dataset in ["r52"]:
for t in types:
maximum, minimum = get_max_min_values(dataset, t, 10)
if t == "count":
minimum = pd.DataFrame()
elif "idf_wiki" in t:
maximum = pd.DataFrame()
get_embeddings_from_disk(maximum, minimum, t, dataset, layer=1)
get_embeddings_from_disk(maximum, minimum, t, dataset, layer=0)
maximum, minimum = get_base_lowest(dataset, 10)
get_embeddings_from_disk(maximum, minimum, "base", dataset, layer=1)
get_embeddings_from_disk(maximum, minimum, "base", dataset, layer=0)
if __name__ == '__main__':
edges = get_number_of_edges()
remove_wrongs(edges) |
"""Create a connection to Board Game Arena and interact with it."""
import json
import logging
from logging.handlers import RotatingFileHandler
import re
import time
import urllib.parse
import requests
from bga_game_list import get_game_list
LOG_FILENAME = "errs"
logger = logging.getLogger(__name__)
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=0)
formatter = logging.Formatter("%(asctime)s | %(name)s | %(levelname)s | %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
MODE_TYPES = {
"normal": 0,
"training": 1,
}
MODE_VALUES = list(MODE_TYPES.keys())
SPEED_TYPES = {
"fast": 0,
"normal": 1,
"slow": 2,
"24/day": 10,
"12/day": 11,
"8/day": 12,
"4/day": 13,
"3/day": 14,
"2/day": 15,
"1/day": 17,
"1/2days": 19,
"nolimit": 20,
}
SPEED_VALUES = list(SPEED_TYPES.keys())
KARMA_TYPES = {"0": 0, "50": 1, "65": 2, "75": 3, "85": 4}
KARMA_VALUES = list(KARMA_TYPES.keys())
LEVEL_VALUES = [
"beginner",
"apprentice",
"average",
"good",
"strong",
"expert",
"master",
]
class BGAAccount:
"""Account user/pass and methods to login/create games with it."""
def __init__(self):
self.base_url = "https://boardgamearena.com"
self.session = requests.Session()
# Get CSRF token from login pagetext
resp = self.session.get(self.base_url + "/account")
resp_text = resp.text
csrf_token_match = re.search(r"id='csrf_token' value='([0-9a-f]*)'", resp_text)
if not csrf_token_match:
return False # Return error condition
self.csrf_token = csrf_token_match[1]
def fetch(self, url):
"""Generic get."""
logger.debug("\nGET: " + url)
with self.session.get(url) as response:
resp_text = response.text
if resp_text[0] in ["{", "["]: # If it's a json
print(f"Fetched {url}. Resp: " + resp_text[:80])
return resp_text
def post(self, url, params):
"""Generic post."""
with self.session.post(url, data=params) as response:
resp_text = response.text
print(f"Posted {url}. Resp: " + resp_text[:80])
def login(self, username, password):
"""Login to BGA provided the username/password. The session will
now have cookies to use for privileged actions."""
url = self.base_url + "/account/account/login.html"
params = {
"email": username,
"password": password,
"rememberme": "on",
"redirect": "",
"csrf_token": self.csrf_token,
"form_id": "loginform",
"dojo.preventCache": str(int(time.time())),
}
logger.debug("LOGIN: " + url + "\nEMAIL: " + params["email"] + "\ncsrf_token:" + self.csrf_token)
self.post(url, params)
return self.verify_privileged()
def logout(self):
"""Logout of current session."""
url = self.base_url + "/account/account/logout.html"
params = {"dojo.preventCache": str(int(time.time()))}
url += "?" + urllib.parse.urlencode(params)
self.fetch(url)
def quit_table(self):
""" Quit the table if the player is currently at one"""
url = self.base_url + "/player"
resp = self.fetch(url)
# Some version of "You are playing" or "Playing now at:"
matches = re.search(r"[Pp]laying[^<]*<a href=\"\/table\?table=(\d+)", resp)
if matches is not None:
table_id = matches[1]
logger.debug("Quitting table" + str(table_id))
quit_url = self.base_url + "/table/table/quitgame.html"
params = {
"table": table_id,
"neutralized": "true",
"s": "table_quitgame",
"dojo.preventCache": str(int(time.time())),
}
quit_url += "?" + urllib.parse.urlencode(params)
self.fetch(quit_url)
def quit_playing_with_friends(self):
"""There is a BGA feature called "playing with friends". Remove friends from the session"""
quit_url = self.base_url + "/group/group/removeAllFromGameSession.html"
params = {"dojo.preventCache": str(int(time.time()))}
quit_url += "?" + urllib.parse.urlencode(params)
self.fetch(quit_url)
def create_table(self, game_name_part):
"""Create a table and return its url. 201,0 is to set to normal mode.
Partial game names are ok, like race for raceforthegalaxy.
Returns (table id (int), error string (str))"""
# Try to close any logged-in session gracefully
lower_game_name = re.sub(r"[^a-z0-9]", "", game_name_part.lower())
self.quit_table()
self.quit_playing_with_friends()
games, err_msg = get_game_list()
if len(err_msg) > 0:
return -1, err_msg
lower_games = {}
for game in games:
lower_name = re.sub(r"[^a-z0-9]", "", game.lower())
lower_games[lower_name] = games[game]
# If name is unique like "race" for "raceforthegalaxy", use that
games_found = []
game_name = ""
for game_i in list(lower_games.keys()):
if game_i == lower_game_name: # if there's an exact match, take it!
game_name = lower_game_name
elif game_i.startswith(lower_game_name):
games_found.append(game_i)
if len(game_name) == 0:
if len(games_found) == 0:
err = (
f"`{lower_game_name}` is not available on BGA. Check your spelling "
f"(capitalization and special characters do not matter)."
)
return -1, err
elif len(games_found) > 1:
err = f"`{lower_game_name}` matches [{','.join(games_found)}]. Use more letters to match."
return -1, err
game_name = games_found[0]
game_id = lower_games[game_name]
url = self.base_url + "/table/table/createnew.html"
params = {
"game": game_id,
"forceManual": "true",
"is_meeting": "false",
"dojo.preventCache": str(int(time.time())),
}
url += "?" + urllib.parse.urlencode(params)
resp = self.fetch(url)
try:
resp_json = json.loads(resp)
except json.decoder.JSONDecodeError:
logger.error("Unable to decode response json:" + resp)
return -1, "Unable to parse JSON from Board Game Arena."
if resp_json["status"] == "0":
err = resp_json["error"]
if err.startswith("You have a game in progress"):
matches = re.match(r"(^[\w !]*)[^\/]*([^\"]*)", err)
err = matches[1] + "Quit this game first (1 realtime game at a time): " + self.base_url + matches[2]
return -1, err
table_id = resp_json["data"]["table"]
return table_id, ""
def set_table_options(self, options, table_id):
url_data = self.parse_options(options, table_id)
if isinstance(url_data, str): # In this case it's an error
return url_data
logger.debug("Got url data :" + str(url_data))
for url_datum in url_data:
self.set_option(table_id, url_datum["path"], url_datum["params"])
def set_option(self, table_id, path, params):
"""Change the game options for the specified."""
url = self.base_url + path
params.update({"table": table_id, "dojo.preventCache": str(int(time.time()))})
url += "?" + urllib.parse.urlencode(params)
self.fetch(url)
def parse_options(self, options, table_id):
"""Create url data that can be parsed as urls"""
# Set defaults if they're not present
defaults = {
"mode": "normal",
"presentation": "Made by discord BGA bot (github.com/pocc/bga_discord)",
}
# options will overwrite defaults if they are there
defaults.update(options)
updated_options = defaults
url_data = []
for option in updated_options:
value = updated_options[option]
option_data = {}
logger.debug(f"Reading option `{option}` with key `{value}`")
if option == "mode":
option_data["path"] = "/table/table/changeoption.html"
mode_name = updated_options[option]
if mode_name not in list(MODE_TYPES.keys()):
return f"Valid modes are training and normal. You entered {mode_name}."
mode_id = MODE_TYPES[mode_name]
option_data["params"] = {"id": 201, "value": mode_id}
elif option == "speed":
option_data["path"] = "/table/table/changeoption.html"
speed_name = updated_options[option]
if speed_name not in list(SPEED_TYPES.keys()):
return f"{speed_name} is not a valid speed. Check !bga options."
speed_id = SPEED_TYPES[speed_name]
option_data["params"] = {"id": 200, "value": speed_id}
elif option == "minrep":
option_data["path"] = "/table/table/changeTableAccessReputation.html"
if value not in list(KARMA_TYPES.keys()):
return f"Invalid minimum karma {value}. Valid values are 0, 50, 65, 75, 85."
option_data["params"] = {"karma": KARMA_TYPES[value]}
elif option == "presentation":
# No error checking is necessary as every string is valid.
option_data["path"] = "/table/table/setpresentation.html"
option_data["params"] = {"value": updated_options[option]}
elif option == "levels":
if "-" not in value:
return "levels requires a dash between levels like `good-strong`."
[min_level, max_level] = value.lower().split("-")
if min_level not in LEVEL_VALUES:
return f"Min level {min_level} is not a valid level ({','.join(LEVEL_VALUES)})"
if max_level not in LEVEL_VALUES:
return f"Max level {max_level} is not a valid level ({','.join(LEVEL_VALUES)})"
level_enum = {LEVEL_VALUES[i]: i for i in range(len(LEVEL_VALUES))}
min_level_num = level_enum[min_level]
max_level_num = level_enum[max_level]
level_keys = {}
for i in range(7):
if min_level_num <= i <= max_level_num:
level_keys["level" + str(i)] = "true"
else:
level_keys["level" + str(i)] = "false"
option_data["path"] = "/table/table/changeTableAccessLevel.html"
option_data["params"] = level_keys
elif option == "players":
# Change minimum and maximum number of players
option_data["path"] = "/table/table/changeWantedPlayers.html"
[minp, maxp] = updated_options[option].split("-")
option_data["params"] = {"minp": minp, "maxp": maxp}
elif option == "restrictgroup":
option_data["path"] = "/table/table/restrictToGroup.html"
group_options = self.get_group_options(table_id)
group_id = -1
for group_o in group_options:
if group_o[1].startswith(value):
group_id = group_o[0]
if group_id != -1:
option_data["params"] = {"group": group_id}
else:
groups_str = "[`" + "`,`".join([g[1] for g in group_options if g[1] != "-"]) + "`]"
return f"Unable to find group {value}. You are a member of groups {groups_str}."
elif option == "lang":
option_data["path"] = "/table/table/restrictToLanguage.html"
option_data["params"] = {"lang": updated_options[option]}
elif option.isdigit():
# If this is an HTML option, set it as such
option_data["path"] = "/table/table/changeoption.html"
option_data["params"] = {"id": option, "value": updated_options[option]}
else:
return f"Option {option} not a valid option."
url_data.append(option_data)
return url_data
def get_group_id(self, group_name):
"""For BGA groups of people."""
uri_vars = {"q": group_name, "start": 0, "count": "Infinity"}
group_uri = urllib.parse.urlencode(uri_vars)
full_url = self.base_url + f"/group/group/findgroup.html?{group_uri}"
result_str = self.fetch(full_url)
result = json.loads(result_str)
group_id = result["items"][0]["id"] # Choose ID of first result
logger.debug(f"Found {group_id} for group {group_name}")
return group_id
def create_table_url(self, table_id):
"""Given the table id, make the table url."""
return self.base_url + "/table?table=" + str(table_id)
def verify_privileged(self):
"""Verify that the user is logged in by accessing a url they should have access to."""
community_text = self.fetch(self.base_url + "/community")
return "You must be logged in to see this page." not in community_text
def get_group_options(self, table_id):
"""The friend group id is unique to every user. Search the table HTML for it."""
table_url = self.base_url + "/table?table=" + str(table_id)
html_text = self.fetch(table_url)
restrict_group_select = re.search(r'<select id="restrictToGroup">([\s\S]*?)<\/select>', html_text)[0]
options = re.findall(r'"(\d*)">([^<]*)', restrict_group_select)
return options
def get_player_id(self, player):
"""Given the name of a player, get their player id."""
url = self.base_url + "/player/player/findplayer.html"
params = {"q": player, "start": 0, "count": "Infinity"}
url += "?" + urllib.parse.urlencode(params)
resp = self.fetch(url)
resp_json = json.loads(resp)
if len(resp_json["items"]) == 0:
return -1
return resp_json["items"][0]["id"]
def get_tables(self, player_id):
"""Get all of the tables that a player is playing at. Tables are returned as json objects."""
url = self.base_url + "/tablemanager/tablemanager/tableinfos.html"
params = {"playerfilter": player_id, "dojo.preventCache": str(int(time.time()))}
url += "?" + urllib.parse.urlencode(params)
resp = self.fetch(url)
resp_json = json.loads(resp)
return resp_json["data"]["tables"]
def get_table_metadata(self, table_data):
"""Get the numbure of moves and progress of the game as strings"""
table_id = table_data["id"]
game_server = table_data["gameserver"]
game_name = table_data["game_name"]
table_url = f"{self.base_url}/{game_server}/{game_name}?table={table_id}"
resp = self.fetch(table_url)
game_progress_match = re.search('updateGameProgression\":([\d]+)}', resp)
if game_progress_match:
game_progress = game_progress_match[1]
else:
game_progress = ""
num_moves_match = re.search('move_nbr":"([^"]*)"', resp)
if num_moves_match:
num_moves = num_moves_match[1]
else:
num_moves = ""
current_player_match = re.search('active_player":"([^"]*)"', resp)
if current_player_match:
current_player = current_player_match[1]
else:
current_player = ""
return game_progress, num_moves, current_player, table_url
def close_connection(self):
"""Close the connection. aiohttp complains otherwise."""
self.session.close()
|
import ruamel.yaml
def load_yml_from_file(file_path):
yaml = ruamel.yaml.YAML()
Data = None
with open(file_path) as file:
Data = yaml.load(file)
return Data
def save_yml_to_file(data, file_path):
yaml = ruamel.yaml.YAML()
with open(file_path, "w") as file:
yaml.dump(data, file)
return True
def yml_clear_list(the_list):
new_list = ruamel.yaml.comments.CommentedSeq(the_list)
new_list.clear()
new_list._yaml_comment = the_list.ca
return new_list
def yml_fix_list_comments(the_list):
if len(list(the_list.ca.items.keys())):
new_comment_key = list(the_list.ca.items.keys())[0]
the_list.ca.items[int(len(the_list) - 1)] = the_list.ca.items.pop(new_comment_key)
return the_list
def yml_add_to_list(the_list, new_val):
the_list.append(new_val)
yml_fix_list_comments(the_list)
return the_list
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Augments json files with table linearization used by baselines.
Note that this code is merely meant to be starting point for research and
there may be much better table representations for this task.
"""
import copy
import json
from absl import app
from absl import flags
from language.totto.baseline_preprocessing import preprocess_utils
import six
flags.DEFINE_string("input_path", None, "Input json file.")
flags.DEFINE_string("output_path", None, "Output directory.")
flags.DEFINE_integer("examples_to_visualize", 100,
"Number of examples to visualize.")
FLAGS = flags.FLAGS
def _generate_processed_examples(input_path):
"""Generate TF examples."""
processed_json_examples = []
with open(input_path, "r", encoding="utf-8") as input_file:
for line in input_file:
if len(processed_json_examples) % 100 == 0:
print("Num examples processed: %d" % len(processed_json_examples))
line = six.ensure_text(line, "utf-8")
json_example = json.loads(line)
table = json_example["table"]
table_page_title = json_example["table_page_title"]
table_section_title = json_example["table_section_title"]
cell_indices = json_example["highlighted_cells"]
subtable = (
preprocess_utils.get_highlighted_subtable(
table=table,
cell_indices=cell_indices,
with_heuristic_headers=True))
# Table strings without page and section title.
full_table_str = preprocess_utils.linearize_full_table(
table=table,
cell_indices=cell_indices,
table_page_title=None,
table_section_title=None)
subtable_str = (
preprocess_utils.linearize_subtable(
subtable=subtable,
table_page_title=None,
table_section_title=None))
full_table_metadata_str = (
preprocess_utils.linearize_full_table(
table=table,
cell_indices=cell_indices,
table_page_title=table_page_title,
table_section_title=table_section_title))
subtable_metadata_str = (
preprocess_utils.linearize_subtable(
subtable=subtable,
table_page_title=table_page_title,
table_section_title=table_section_title))
processed_json_example = copy.deepcopy(json_example)
processed_json_example["full_table_str"] = full_table_str
processed_json_example["subtable_str"] = subtable_str
processed_json_example[
"full_table_metadata_str"] = full_table_metadata_str
processed_json_example["subtable_metadata_str"] = subtable_metadata_str
processed_json_examples.append(processed_json_example)
print("Num examples processed: %d" % len(processed_json_examples))
return processed_json_examples
def main(_):
input_path = FLAGS.input_path
output_path = FLAGS.output_path
processed_json_examples = _generate_processed_examples(input_path)
with open(output_path, "w", encoding="utf-8") as f:
for json_example in processed_json_examples:
f.write(json.dumps(json_example) + "\n")
if __name__ == "__main__":
app.run(main)
|
#
# Time complexity:
# O(lines*columns) (worst case, where all the neighbours have the same color)
# O(1) (best case, where no neighbour has the same color)
#
# Space complexity:
# O(1) (color changes applied in place)
#
def flood_fill(screen, lines, columns, line, column, color):
def inbound(l, c):
return (l >= 0 and l < lines) and (c >= 0 and c < columns)
def key(l, c):
return "{},{}".format(l, c)
stack = [[line, column]]
visited = set()
while stack:
l, c = stack.pop()
# Mark the cell as visited
visited.add(key(l, c))
# Schedule the visit to all neighbours (except diagonal),
# which weren't visited yet and has the same color
neighbours = [
[l-1, c ],
[l+1, c ],
[l , c-1],
[l , c+1]
]
for nl, nc in neighbours:
if inbound(nl, nc) and key(nl, nc) not in visited and screen[nl][nc] == screen[l][c]:
stack.append([nl, nc])
# Paint the current cell
screen[l][c] = color
return screen
|
n = int(input('Digite um numero qualquer:'))
if n % 2 == 0:
print('O número {} é \033[32m Par!\033[m'.format(n))
else:
print('O número {} é \033[31m Impar!\033[m'.format(n))
|
# -*- coding: utf-8 -*-
import struct
def write_int8(octet: bytes, pos: int, value: int):
octet = bytearray(octet)
octet[pos] = value
return bytes(octet)
def write_int16(octet: bytes, pos: int, value: int, big_endian: bool = False):
octet = bytearray(octet)
if big_endian:
octet[pos] = value >> 8
octet[pos + 1] = value & 0x00FF
else:
octet[pos + 1] = value >> 8
octet[pos] = value & 0x00FF
return bytes(octet)
def write_int24(octet: bytes, pos: int, value: int, big_endian: bool = False):
octet = bytearray(octet)
if big_endian:
octet[pos] = value >> 16
octet[pos + 1] = value >> 8 & 0x0000FF
octet[pos + 2] = value & 0x0000FF
else:
octet[pos + 2] = value >> 16
octet[pos + 1] = value >> 8 & 0x0000FF
octet[pos] = value & 0x0000FF
return bytes(octet)
def write_int32(octet: bytes, pos: int, value: int, big_endian: bool = False):
octet = bytearray(octet)
if big_endian:
octet[pos] = value >> 24
octet[pos + 1] = value >> 16 & 0x0000FF
octet[pos + 2] = value >> 8 & 0x0000FF
octet[pos + 3] = value & 0x000000FF
else:
octet[pos + 3] = value >> 24
octet[pos + 2] = value >> 16 & 0x0000FF
octet[pos + 1] = value >> 8 & 0x0000FF
octet[pos] = value & 0x000000FF
return bytes(octet)
def write_str(octet: bytes, pos: int, length: int, value: str, big_endian: bool = False):
octet = bytearray(octet)
if big_endian:
for i in range(length):
octet[pos + length - i - 1] = value[i]
else:
for i in range(length):
octet[pos + i] = value[i]
return bytes(octet)
|
import argparse
import logging
import os
import numpy as np
import torch
import utils
import model.net as net
from model.data_loader import DataLoader
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/base_model',
help="Directory containing params.json")
parser.add_argument('--restore_file', default='best', help="name of the file in --model_dir \
containing weights to load")
def evaluate(model, loss_fn, data_iterator, metrics, params, num_steps):
"""Evaluate the model on 'num_steps' batches"""
model.eval() # set model to evaluation mode
summ = [] # summary for current eval loop
for _ in range(num_steps):
data_batch, labels_batch = next(data_iterator)
output_batch = model(data_batch)
loss = loss_fn(output_batch, labels_batch)
output_batch = output_batch.data.cpu().numpy()
labels_batch = labels_batch.data.cpu().numpy()
summary_batch = {metric: metrics[metric](
output_batch, labels_batch) for metric in metrics}
summary_batch['loss'] = loss.item()
summ.append(summary_batch)
# compute mean of all metrics in summary
metrics_mean = {metric: np.mean([x[metric]
for x in summ]) for metric in summ[0]}
metrics_string = " ; ".join("{}: {:05.3f}".format(k, v)
for k, v in metrics_mean.items())
logging.info('- Eval metrics: ' + metrics_string)
return metrics_mean
if __name__ == '__main__':
"""Evaluate the model on the test set"""
# Load the parameters
args = parser.parse_args()
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(
json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# use GPU if available
params.cuda = torch.cuda.is_available() # use GPU is available
# Set the random seed for reproducible experiments
torch.manual_seed(230)
if params.cuda:
torch.cuda.manual_seed(230)
# Get the logger
utils.set_logger(os.path.join(args.model_dir, 'evaluate.log'))
# Create the input data pipeline
logging.info("Creating the dataset...")
# load data
data_loader = DataLoader('data/', params)
data = data_loader.load_data(['test'], 'data/')
test_data = data['test']
# specify the test set size
params.test_size = test_data['size']
test_data_iterator = data_loader.data_iterator(test_data, params)
logging.info("- done.")
# Define the model
model = net.Net(params).cuda() if params.cuda else net.Net(params)
loss_fn = net.loss_fn
metrics = net.metrics
logging.info("Starting evaluation")
# Reload weights from the saved file
utils.load_checkpoint(os.path.join(
args.model_dir, args.restore_file + '.pth.tar'), model)
# Evaluate
num_steps = (params.test_size + 1) // params.batch_size
test_metrics = evaluate(
model, loss_fn, test_data_iterator, metrics, params, num_steps)
save_path = os.path.join(
args.model_dir, "metrics_test_{}.json".format(args.restore_file))
utils.save_dict_to_json(test_metrics, save_path)
|
# -*- coding: iso-8859-1 -*-
from app import db
class User(db.Model):
__tablename__ = 'alumnos' # TIENE QUE ESTAR EN LOWERCASE!!! increible...
CODIGO = db.Column('CODIGO', db.Integer, primary_key=True)
NOMBRE = db.Column('NOMBRE', db.String(150))
APELLIDO = db.Column('APELLIDO', db.String(100))
EMAIL = db.Column('EMAIL', db.String(100))
NRODOC = db.Column('NRODOC', db.String(15))
CLAVE = db.Column('CLAVE', db.String(15))
avisos = db.relationship('AvisosAlumnos', backref='user', lazy='dynamic')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.CODIGO) # python 3
def __repr__(self):
return '<Alumno %r>' % (self.NOMBRE)
# ejemplos de querys que puedo realizar
# http://packages.python.org/Flask-SQLAlchemy/index.html
class AvisosAlumnos(db.Model):
__tablename__ = 'avisosalumnos'
CODIGO = db.Column('CODIGO', db.Integer, primary_key = True)
FECHAALTA = db.Column('FECHAALTA', db.DateTime)
TEXTO = db.Column('TEXTO', db.LargeBinary)
CODALUMNO = db.Column('CODALUMNO', db.Integer, db.ForeignKey('alumnos.CODIGO'))
def __repr__(self):
return '<AvisoAlumno %r>' % (self.TEXTO) |
"""
Tests for defer() and only().
"""
from django.db import models
from django.db.models.query_utils import DeferredAttribute
class Secondary(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
class Primary(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary)
def __unicode__(self):
return self.name
class Child(Primary):
pass
class BigChild(Primary):
other = models.CharField(max_length=50)
def count_delayed_fields(obj, debug=False):
"""
Returns the number of delayed attributes on the given model instance.
"""
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname),
DeferredAttribute):
if debug:
print field.name, field.attname
count += 1
return count
__test__ = {"API_TEST": """
To all outward appearances, instances with deferred fields look the same as
normal instances when we examine attribute values. Therefore we test for the
number of deferred fields on returned instances (by poking at the internals),
as a way to observe what is going on.
>>> s1 = Secondary.objects.create(first="x1", second="y1")
>>> p1 = Primary.objects.create(name="p1", value="xx", related=s1)
>>> qs = Primary.objects.all()
>>> count_delayed_fields(qs.defer('name')[0])
1
>>> count_delayed_fields(qs.only('name')[0])
2
>>> count_delayed_fields(qs.defer('related__first')[0])
0
>>> obj = qs.select_related().only('related__first')[0]
>>> count_delayed_fields(obj)
2
>>> obj.related_id == s1.pk
True
>>> count_delayed_fields(qs.defer('name').extra(select={'a': 1})[0])
1
>>> count_delayed_fields(qs.extra(select={'a': 1}).defer('name')[0])
1
>>> count_delayed_fields(qs.defer('name').defer('value')[0])
2
>>> count_delayed_fields(qs.only('name').only('value')[0])
2
>>> count_delayed_fields(qs.only('name').defer('value')[0])
2
>>> count_delayed_fields(qs.only('name', 'value').defer('value')[0])
2
>>> count_delayed_fields(qs.defer('name').only('value')[0])
2
>>> obj = qs.only()[0]
>>> count_delayed_fields(qs.defer(None)[0])
0
>>> count_delayed_fields(qs.only('name').defer(None)[0])
0
User values() won't defer anything (you get the full list of dictionaries
back), but it still works.
>>> qs.defer('name').values()[0] == {'id': p1.id, 'name': u'p1', 'value': 'xx', 'related_id': s1.id}
True
>>> qs.only('name').values()[0] == {'id': p1.id, 'name': u'p1', 'value': 'xx', 'related_id': s1.id}
True
Using defer() and only() with get() is also valid.
>>> count_delayed_fields(qs.defer('name').get(pk=p1.pk))
1
>>> count_delayed_fields(qs.only('name').get(pk=p1.pk))
2
# KNOWN NOT TO WORK: >>> count_delayed_fields(qs.only('name').select_related('related')[0])
# KNOWN NOT TO WORK >>> count_delayed_fields(qs.defer('related').select_related('related')[0])
# Saving models with deferred fields is possible (but inefficient, since every
# field has to be retrieved first).
>>> obj = Primary.objects.defer("value").get(name="p1")
>>> obj.name = "a new name"
>>> obj.save()
>>> Primary.objects.all()
[<Primary: a new name>]
# Regression for #10572 - A subclass with no extra fields can defer fields from the base class
>>> _ = Child.objects.create(name="c1", value="foo", related=s1)
# You can defer a field on a baseclass when the subclass has no fields
>>> obj = Child.objects.defer("value").get(name="c1")
>>> count_delayed_fields(obj)
1
>>> obj.name
u"c1"
>>> obj.value
u"foo"
>>> obj.name = "c2"
>>> obj.save()
# You can retrive a single column on a base class with no fields
>>> obj = Child.objects.only("name").get(name="c2")
>>> count_delayed_fields(obj)
3
>>> obj.name
u"c2"
>>> obj.value
u"foo"
>>> obj.name = "cc"
>>> obj.save()
>>> _ = BigChild.objects.create(name="b1", value="foo", related=s1, other="bar")
# You can defer a field on a baseclass
>>> obj = BigChild.objects.defer("value").get(name="b1")
>>> count_delayed_fields(obj)
1
>>> obj.name
u"b1"
>>> obj.value
u"foo"
>>> obj.other
u"bar"
>>> obj.name = "b2"
>>> obj.save()
# You can defer a field on a subclass
>>> obj = BigChild.objects.defer("other").get(name="b2")
>>> count_delayed_fields(obj)
1
>>> obj.name
u"b2"
>>> obj.value
u"foo"
>>> obj.other
u"bar"
>>> obj.name = "b3"
>>> obj.save()
# You can retrieve a single field on a baseclass
>>> obj = BigChild.objects.only("name").get(name="b3")
>>> count_delayed_fields(obj)
4
>>> obj.name
u"b3"
>>> obj.value
u"foo"
>>> obj.other
u"bar"
>>> obj.name = "b4"
>>> obj.save()
# You can retrieve a single field on a baseclass
>>> obj = BigChild.objects.only("other").get(name="b4")
>>> count_delayed_fields(obj)
4
>>> obj.name
u"b4"
>>> obj.value
u"foo"
>>> obj.other
u"bar"
>>> obj.name = "bb"
>>> obj.save()
"""}
|
from wetterdienst.dwd.forecasts.api import DWDMosmixSites, DWDMosmixData
from wetterdienst.dwd.forecasts.metadata import (
DWDForecastDate,
DWDForecastParameter,
DWDMosmixType,
)
|
with open('inputs/input2.txt') as fin:
raw = fin.read()
def parse(raw):
start = [(x[:3], int(x[4:])) for x in (raw.split('\n'))]
return start
a = parse(raw)
def part_1(arr):
indices = set()
acc = 0
i = 0
while i < len(arr):
pair = arr[i]
if i in indices:
break
indices.add(i)
if pair[0] == 'acc':
acc += pair[1]
i += 1
elif pair[0] == 'jmp':
i += pair[1]
else:
i += 1
return acc
def not_infinite(arr):
indices = set()
acc = 0
i = 0
while True:
if i in indices:
return 0
indices.add(i)
if i == len(arr):
return acc
pair = arr[i]
if pair[0] == 'acc':
acc += pair[1]
i += 1
elif pair[0] == 'jmp':
i += pair[1]
else:
i += 1
def part_2(arr):
for i, x in enumerate(arr):
if x[0] == 'jmp':
test = arr[:i] + [('nop', x[1])] + arr[i + 1:]
if c := not_infinite(test):
return c
if x[0] == 'nop':
test = arr[:i] + [('jmp', x[1])] + arr[(i + 1):]
if c := not_infinite(test):
return c
print(part_1(a))
print(part_2(a))
|
from .utils import Atom, Residue, ActiveSite
from .io import read_active_sites
import numpy as np
from random import seed
from itertools import combinations as combo
from math import log
###########################
# Clustering By Partition #
###########################
def cluster_by_partitioning(active_sites,num_clusters=7, max_iters=10000, dist_thresh=0.001):
"""
Cluster a given set of ActiveSite instances using a partitioning method.
Input: a list of ActiveSite instances
(OPTIONAL): number of clusters, maximum iterations, and distance threshold
Output: a clustering of ActiveSite instances
(this is really a list of clusters, each of which is list of
ActiveSite instances)
"""
labels = k_means(active_sites,num_clusters,max_iters,dist_thresh)
clustering = []
for clust in range(num_clusters):
clustering.append([active_sites[i] for i in range(len(labels)) if labels[i]==clust])
return clustering
def k_means(active_sites,num_clusters,max_iters,dist_thresh,printMe=False):
"""
K-Means clustering
Input: List of active site instances, number of clusters, number of iters and tresh
Output: List of labels and cluster centers as numpy arrays
"""
seed()
# edge cases
num_clusters = round(num_clusters)
max_iters = round(max_iters)
if printMe:
print('------')
if num_clusters>len(active_sites) or num_clusters<1:
if printMe:
print("Invalid number of clusters: Default to 7")
num_clusters = 7
else:
if printMe:
print('Number of Clusters:',num_clusters)
# initialize centroids randomly by choosing X random points
iter = 0
inds = np.random.choice(len(active_sites),num_clusters,replace=False)
cluster_centers = np.array([active_sites[i].get_norm_metrics() for i in inds])
# init trackers (10 most recent ones)
prev_centers = np.zeros(cluster_centers.shape)
# begin algorithm
if printMe:
print('Maximum Iterations:',max_iters)
print('Distance Threshold:',dist_thresh)
print('------')
while not iter > max_iters and not distance_cutoff(
prev_centers,cluster_centers,dist_thresh,printMe
):
prev_centers = cluster_centers
iter += 1
if iter%1000==0:
print("Iteration no.", iter)
#assign all objects to a cluster, then reevaluate cluster centers
labels = assign_to_clusters(active_sites,cluster_centers)
cluster_centers = update_cluster_locs(active_sites, labels, num_clusters)
if printMe:
print('Total iterations:',iter,'\n')
return labels
def distance_cutoff(prev,current,thresh,printMe):
"""
Input: numpy arrays of previous/current centroid locations, and threshold
Output: boolean of whether centroids have moved farther than the threshold on average
"""
sum = 0
for i in range(prev.shape[0]):
sum += np.linalg.norm(prev[i,:] - current[i,:])
sum /= prev.shape[0]
if sum < thresh and printMe:
print("Threshold reached, mean center distance travelled this step is ", sum)
return sum < thresh
def assign_to_clusters(active_sites,cluster_centers):
"""
Input: List of ActiveSite instances, and 2D numpy array of cluster centers
Output: array of labels, value is the row (cluster) that Site was assigned to.
"""
labels = np.zeros([len(active_sites)])
#for each active site
for i in range(len(active_sites)):
site_metrics = active_sites[i].get_norm_metrics() #pull its metrics
cluster_dist = np.sqrt(((cluster_centers - site_metrics)**2).sum(axis=1)) #get distances
labels[i] = np.argmin(cluster_dist) #label according to closest one
return labels
def update_cluster_locs(active_sites, labels, num_clusters):
"""
Input: List of ActiveSite instances and labels, and the number of clusters
Output: Numpy array of new cluster centers
"""
new_cluster_centers = np.zeros(shape=(num_clusters,len(active_sites[0].get_norm_metrics())))
# for each cluster, get all assigned sites and average their metrics
for clust in range(num_clusters):
site_inds = [ind for ind,val in enumerate(labels) if val==clust]
all_clust_metrics = np.array([active_sites[i].get_norm_metrics() for i in site_inds])
new_center = all_clust_metrics.mean(axis=0)
new_cluster_centers[clust,:] = new_center
return new_cluster_centers
###########################
# Hierarchical Clustering #
###########################
def cluster_hierarchically(active_sites,num_clusters=7):
"""
Cluster the given set of ActiveSite instances using a hierarchical algorithm.
Input: a list of ActiveSite instances
(OPTIONAL): number of clusters (default 7)
Output: a list of clusterings
(each clustering is a list of lists of ActiveSite instances)
"""
labels = centroid_linkage(active_sites,num_clusters)
clustering = []
for clust in np.unique(labels):
clustering.append([active_sites[ind] for ind,val in enumerate(labels.tolist() )if val==clust])
return clustering
def centroid_linkage(active_sites,min_clusters,printMe=False):
"""
Centroid Linkage clustering
This implementation builds until it hits min_clusters. Dendrograms not supported.
Inputs: List of ActiveSite instances and number of clusters
Outputs: List of labels and cluster centers
"""
seed()
# edge cases
min_clusters = round(min_clusters)
if printMe:
print('------')
if min_clusters>len(active_sites) or min_clusters<1:
if printMe:
print("Invalid number of clusters: Default to 7")
min_clusters = 7
else:
if printMe:
print('Target Number of Clusters:',min_clusters)
# initialize variables
num_clusters = len(active_sites)
labels = np.arange(len(active_sites))
np.random.shuffle(labels)
# begin algorithm
while num_clusters > min_clusters:
#calculate the centroid of each cluster and find the smallest distance
clusters = shortest_centroid_dist(active_sites,labels)
#merge the two clusters
labels = merge(clusters,labels)
num_clusters -= 1
if num_clusters%10==0:
print('Number of Clusters:',num_clusters)
return labels
def shortest_centroid_dist(active_sites,labels):
"""
Input: List of active_sites and labels
Output: List of two cluster labels that are closest
"""
# find unique values in labels and their indices
u, u_ind = np.unique(labels,return_inverse=True)
# go through all unique pairwise combinations of labels
# and test the distances between centroids
shortest = 1000000000
closest_clusters = [0,0]
for i,j in combo(u,2):
# get metrics from all sites for each cluster pair
inds_i = [ind for ind,val in enumerate(labels.tolist()) if val==i]
metrics_i = np.array([active_sites[k].get_norm_metrics() for k in inds_i])
inds_j = [ind for ind,val in enumerate(labels.tolist()) if val==j]
metrics_j = np.array([active_sites[m].get_norm_metrics() for m in inds_j])
# measure distance between centroids and update if necessary
dist = np.linalg.norm(metrics_j.mean(axis=0)-metrics_i.mean(axis=0))
if dist < shortest:
shortest = dist
closest_clusters = [i,j]
return closest_clusters
def merge(clusters, labels):
"""
Input: List of two cluster labels and list of all labels
Output: List of labels, with specified ones merged
"""
#arbitrarily merge first INTO second (so that first label no longer exists)
labels[labels==clusters[0]] = clusters[1]
return labels
###########################
# Eval Clustering Quality #
###########################
def quality(clustering):
"""
Measures quality of clustering quantitatively
Input: An embedded list of clustered ActiveSite instances (generated by clustering alg)
Output: The value Q, the average inter-object distance among clusters in the data
"""
Q = 0
# loop through clusters
for n in range(len(clustering)):
cluster_mean = 0
num_pairs = 0
# loop through pairs per cluster
for i,j in combo(clustering[n],2):
num_pairs += 1
cluster_mean += np.linalg.norm(i.get_norm_metrics()-j.get_norm_metrics())
# normalize to remain unbiased by cluster size
if num_pairs==0:
continue
else:
cluster_mean /= num_pairs
Q += cluster_mean
Q /= len(clustering)
return Q
def compare(clust_a,clust_b):
"""
Compares two culsterings. Positive = clustering B is better than A
Input: Two clusterings from above clustering algorithms
Output: log-ratio comparison of the quality of the two
"""
return log(quality(clust_a)/quality(clust_b))
|
import sys
import json
# see http://initd.org/psycopg/docs/usage.html
import psycopg2
# from psycopg2.extras import RealDictCursor
# see https://stackoverflow.com/questions/10252247/how-do-i-get-a-list-of-column-names-from-a-psycopg2-cursor
# about half way down
import psycopg2.extras
# see https://stackoverflow.com/questions/50174683/how-to-load-data-into-pandas-from-a-large-database
# even though I am not worried about the large aspect for now
import pandas as pd
# see pandas 1.0.3 api reference guid:
# https://pandas.pydata.org/pandas-docs/stable/reference/index.html
# older pandas: see https://pandas.pydata.org/pandas-docs/version/0.13/api.html, search for sql
import pandas.io.sql as psql
# see https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_introduction.htm
# from sqlalchemy import create_engine
import numpy as np
from psycopg2.extensions import register_adapter, AsIs
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
#############################################
#
#
#
#############################################
def connect_postgres(dbname, user, password, host, port):
try:
print('\nAttempting to connect to a postgres database using psycopq2')
conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host, port=port)
cursor = conn.cursor()
print('Successfully connected to the postgres database, and created a cursor for it')
db = {'conn': conn, 'cursor': cursor}
# see https://www.psycopg.org/docs/extras.html
db['cursor_json'] = db['conn'].cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# use the next one if you want records without the columns names
# db['cursor_json'] = db['conn'].cursor(cursor_factory=psycopg2.extras.DictCursor)
return db
except Exception as e:
print ("\nI am unable to connect to the database")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
print('\nExiting program because failed to connect to opus database\n')
sys.exit()
def create_schema(db, schema_name):
print('\nEntering create_schema function, with schema name ' + schema_name)
q = "CREATE SCHEMA IF NOT EXISTS " + schema_name
try:
db['cursor'].execute(q)
db['conn'].commit()
print('\nExecuted the CREATE SCHEMA IF NOT EXISTS command, and committed')
except Exception as e:
print ("\nUnable to execute the CREATE SCHEMA IF NOT EXISTS command")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
def set_search_path(db, path):
try:
db['cursor'].execute("set search_path to " + path)
db['conn'].commit()
print('\nSet search_path to "' + path + '" and committed')
except Exception as e:
print ("\nUnable to set search_path to '" + path + "', perhaps because that doesn't exist as a schema")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
def execute_db_update(db, update):
try:
db['cursor'].execute(update)
db['conn'].commit()
print('\nSuccessfully executed the db update and committed')
except Exception as e:
print ("\nUnable to execute the db update")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
def execute_db_query(db, query):
try:
db['cursor'].execute(query)
db['conn'].commit()
except Exception as e:
print ("\nUnable to execute the given query")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
def delete_all_from_table(db, table):
q = '''
DELETE
FROM table
'''
q = q.replace('table', table)
execute_db_update(db, q)
'''
def add_indexes_to_mimiciii_tables(db):
print('\nEntered add_indexes_to_mimiciii_tables')
q = {}
q['chartevents__itemid'] = """
create index chartevents__itemid
on chartevents(itemid)
"""
q['chartevents__hadm_id'] = """
create index chartevents__hadm_id
on chartevents(hadm_id)
"""
q['chartevents__subject_id'] = """
create index chartevents__subject_id
on chartevents(subject_id)
"""
q['inputevents_mv__subj_item_starttime'] = """
create index inputevents_mv__subj_item_starttime
on inputevents_mv(subject_id, itemid, starttime)
"""
q['labevents__subject_id'] = """
create index labevents__subject_id
on labevents(subject_id)
"""
q['d_items__category'] = """
create index d_items__category
on d_items(category)
"""
for key in q:
try:
db['cursor'].execute(q[key])
db['conn'].commit()
print(' Successfully created index ' + key)
except: #Exception as e:
db['conn'].rollback()
print(' Failed to create index ' + key + ', probably because it already exists')
"""
# to use this part, also adjust the "except" line 3 lines above
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
"""
print('Added indexes for mimiciii tables (if needed), including a commit')
'''
def close_postgres(db):
try:
db['cursor'].close()
db['conn'].close()
print('\nHave closed the cursor and connection to the database')
except:
print('\nInput to the close_postgres function call is not holding a postgres connection and cursor')
# following about the third comment down in
# https://stackoverflow.com/questions/23103962/how-to-write-dataframe-to-postgres-table
# Assumes that:
# table_name has been defined (and typically, is empty)
# table_name column names are same as df (but I think can be in different order)
# table_name column data types match with df column data types
# if table_name does not include a schema name, then already a "set search_path to ...' has been invoked
# db is a structure with 'conn' and 'cursor', created using connect_postgres() above
def load_df_into_table_with_same_columns(df, db, table_name):
print('\nEntering routine will load a dataframe into the postgres table ' + table_name)
if len(df) > 0:
df_columns = list(df)
# create (col1,col2,...)
columns = ",".join(df_columns)
# create VALUES('%s', '%s",...) one '%s' per column
values = "VALUES({})".format(",".join(["%s" for _ in df_columns]))
#create INSERT INTO table (columns) VALUES('%s',...)
insert_stmt = "INSERT INTO {} ({}) {}".format(table_name,columns,values)
try:
psycopg2.extras.execute_batch(db['cursor'], insert_stmt, df.values)
db['conn'].commit()
print('Succeeded with the insertion of dataframe records into postgres table')
except Exception as e: # if you don't want the exception comment, then drop "Exception as e"
db['conn'].rollback()
print(' Failed to load dataframe into the table with name "' + table_name + '"')
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
return
def import_query_into_df(query, db):
try:
df = pd.read_sql_query(query, db['conn'])
print('Succeeded in pulling query output into a dataframe')
return df
except Exception as e: # if you don't want the exception comment, then drop "Exception as e"
db['conn'].rollback()
print(' Failed to pull query output into a dataframe')
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
return
def export_table_to_csv(table, db, dirName, timestamp):
print('\nEntering function to exoirt table "' + table + '" to csv file')
q = "select * from " + table
outputquery = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(q)
# timestamp = datetime.now().strftime('%Y-%m-%d--%H-%M')
# dirName = OPUS_DATA_OUTPUTS_DIR
fileName = timestamp + '_' + table + '.csv'
with open(dirName + fileName, 'w') as f:
db['cursor'].copy_expert(outputquery, f)
f.close()
print(' Wrote csv file ' + dirName + fileName )
# util_general.print_current_time()
def export_query_to_csv(db, q, timestamp, dirName, filenameroot):
print('\nEntering function to write output of a query into csv file')
outputquery = "COPY ({0}) TO STDOUT WITH CSV HEADER".format(q)
# timestamp = datetime.now().strftime('%Y-%m-%d--%H-%M')
# dirName = OPUS_DATA_OUTPUTS_DIR
fileName = timestamp + '__' + filenameroot + '.csv'
try:
with open(dirName + fileName, 'w') as f:
db['cursor'].copy_expert(outputquery, f)
f.close()
print('\nSuccessfully ran query and wrote output into csv file: \n' + dirName + fileName )
# util_general.print_current_time()
except Exception as e:
print ("\nException in function export_query_to_csv ")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
print("\nNow closing the database connection")
close_postgres(db)
def export_query_to_json(db, q):
try:
db['cursor_json'].execute(q)
# the "default=str" takes care of date objects from Postgres, and maps them to string
# so that JSON can serialize it
return json.dumps(db['cursor_json'].fetchall(), indent=2, default=str)
except Exception as e:
print ("\nException in function export_query_to_json ")
err_msg = ''
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
err_msg = err_msg + str(e.message)
else:
print(e)
err_msg = err_msg + str(e)
print('\nerr_msg is: ' + err_msg)
print("\nNow closing the database connection")
close_postgres(db)
return {"ERROR" : "Failed to write query into json, with exception message: '" \
+ err_msg + "'"}
# print('\nExiting program because failed to connect to opus database\n')
# sys.exit()
# illustration of using sqlalchemy
def testdb(db_eng):
print('\nEntered the function testdb')
q1 = "set search_path to opus"
q2 = "CREATE TABLE IF NOT EXISTS films (title text, director text, year text)"
q3 = """
INSERT INTO opus.films (title, director, year)
VALUES ('Doctor Strange', 'Scott Derrickson', '2016')
"""
with db_eng.connect() as conn:
conn.execute(q1)
conn.execute(q2)
conn.execute(q3)
def return_row_after_insert(db, query):
row = {}
try:
db['cursor'].execute(query)
row = db['cursor'].fetchone()
db['conn'].commit()
print('Succeeded with the insertion of dataframe records into postgres table')
except Exception as e: # if you don't want the exception comment, then drop "Exception as e"
db['conn'].rollback()
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
return row
def execute_db_bulk_update(db, query, rows):
try:
psycopg2.extras.execute_batch(db['cursor'], query, rows)
db['conn'].commit()
print('\nSuccessfully executed the db update and committed')
except Exception as e:
print ("\nUnable to execute the db update")
print(' The exception error message is as follows:')
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
|
from dataclasses import make_dataclass
from datetime import date, timedelta
import logging
import os
from settings.constants import VITIGEOSS_CONFIG_FILE
from pandas.core.frame import DataFrame
import json
import torch
from torch._C import device, dtype
import torch.nn.functional as F
from phenoai.models import ModelsEnum
import pandas as pd
from settings.instance import settings
import numpy as np
logger = logging.getLogger()
class AIManager:
def __init__(self) -> None:
with open(VITIGEOSS_CONFIG_FILE) as f:
self.config = json.loads(f.read())
self.model = None
self.last_prediciton = None
def load_model(self, model_enum: ModelsEnum, place: str, variety: str):
self.model, self.model_parameters = ModelsEnum.make(model_enum, place=place, variety=variety)
path = self.model.get_weights_path(root='/mnt/model_weights', place=place, variety=variety)
if os.path.exists(path):
self.model.load_state_dict(torch.load(path, map_location=torch.device('cpu')), strict=True)
else:
raise FileNotFoundError(f'Model weights file {path} for model {model_enum.name} not found!')
def run_inference(self, input_df: DataFrame, device='cpu'):
self.model = self.model.to(device)
self.model.eval()
X = input_df.loc[:, self.model.input_features].to_numpy()
train_data_min = np.array(self.model_parameters['train_data_min'])
train_data_max = np.array(self.model_parameters['train_data_max'])
X_std = (X - train_data_min) / (train_data_max - train_data_min) # Min-Max normalization
X_scaled = X_std * (self.model_parameters['feature_range'][1] - (self.model_parameters['feature_range'][0])) + (self.model_parameters['feature_range'][0]) # Scaling
src = torch.from_numpy(X_scaled).float()
src = src[~torch.any(src.isnan(), dim=1)] # Filter out NaN values
self.last_prediciton = self.model.run_inference(src.unsqueeze(0), device)
def get_inference_result(self, year: int):
prediction_df = pd.DataFrame(self.get_phase_change_dates(self.last_prediciton, year=year), columns=['predicted date'])
comparison_df = pd.concat([prediction_df], axis=1)
comparison_df.index = pd.Index(self.model.output_features) # Assign phenological phases to df phases (e.g. BudBreak, FruitSet, ...)
return comparison_df
# segna il cambio di fase alla prima occorrenza di un valore sopra il threshold per quella fase
@staticmethod
def get_phase_change_dates(phases_array, year, threshold=0.5):
# adjust initial predictions to avoid phase changes in the first days of the year (due to model convergence period)
phases_array[:30,:] = 0
phase_change_dates = []
base_date = date(year, 1, 1)
indices_of_phase_change = []
for phase in range(phases_array.shape[1]):
try:
indices_of_phase_change.append(np.where(phases_array[:, phase] > threshold)[0][0])
except Exception:
indices_of_phase_change.append(0)
for index in indices_of_phase_change:
delta = timedelta(days=int(index))
phase_change_dates.append(base_date + delta)
return phase_change_dates
def ai_manager():
aim = AIManager()
try:
yield aim
finally:
del(aim) |
import tensorflow as tf
input_batch = tf.constant([
[ # First Input
[[0.0], [1.0]],
[[2.0], [3.0]]
],
[ # Second Input
[[2.0], [4.0]],
[[6.0], [8.0]]
]
])
kernel = tf.constant([
[
[[1.0, 2.0]]
]
])
conv2d = tf.nn.conv2d(input_batch, kernel, strides=[1, 1, 1, 1], padding='SAME')
with tf.Session() as sess:
print(sess.run(tf.shape(input_batch)))
# [2 2 2 1]
print(sess.run(tf.shape(kernel)))
# [1 1 1 2]
print(sess.run(conv2d))
# [
# [[[0. 0.]
# [1. 2.]]
#
# [[2. 4.]
# [3. 6.]]]
#
#
# [[[2. 4.]
# [4. 8.]]
#
# [[6. 12.]
# [8.
# 16.]]]]
print(sess.run(tf.shape(conv2d)))
# [2 2 2 2]
"""输出是另一个1input_batch同秩的张量,但是其最内层维度与卷积核相同
输出层与输入层在最内的像素点级别有差异"""
print(sess.run(input_batch)[0][1][1]) # 表示输入图中右下角的像素点
print(sess.run(conv2d)[0][1][1]) # 表示输出图中右下角的像素点
# [ 3.]
# [3. 6.]
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Tuple
import torch
from pytorch3d.transforms import Transform3d
def camera_to_eye_at_up(
world_to_view_transform: Transform3d,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Given a world to view transform, return the eye, at and up vectors which
represent its position.
For example, if cam is a camera object, then after running
.. code-block::
from cameras import look_at_view_transform
eye, at, up = camera_to_eye_at_up(cam.get_world_to_view_transform())
R, T = look_at_view_transform(eye=eye, at=at, up=up)
any other camera created from R and T will have the same world to view
transform as cam.
Also, given a camera position R and T, then after running:
.. code-block::
from cameras import get_world_to_view_transform, look_at_view_transform
eye, at, up = camera_to_eye_at_up(get_world_to_view_transform(R=R, T=T))
R2, T2 = look_at_view_transform(eye=eye, at=at, up=up)
R2 will equal R and T2 will equal T.
Args:
world_to_view_transform: Transform3d representing the extrinsic
transformation of N cameras.
Returns:
eye: FloatTensor of shape [N, 3] representing the camera centers in world space.
at: FloatTensor of shape [N, 3] representing points in world space directly in
front of the cameras e.g. the positions of objects to be viewed by the
cameras.
up: FloatTensor of shape [N, 3] representing vectors in world space which
when projected on to the camera plane point upwards.
"""
cam_trans = world_to_view_transform.inverse()
# In the PyTorch3D right handed coordinate system, the camera in view space
# is always at the origin looking along the +z axis.
# The up vector is not a position so cannot be transformed with
# transform_points. However the position eye+up above the camera
# (whose position vector in the camera coordinate frame is an up vector)
# can be transformed with transform_points.
eye_at_up_view = torch.tensor(
[[0, 0, 0], [0, 0, 1], [0, 1, 0]], dtype=torch.float32, device=cam_trans.device
)
eye_at_up_world = cam_trans.transform_points(eye_at_up_view).reshape(-1, 3, 3)
eye, at, up_plus_eye = eye_at_up_world.unbind(1)
up = up_plus_eye - eye
return eye, at, up
|
from django.shortcuts import render, redirect
from .models import Room
from .forms import RoomForm
# Create your views here.
def home(request):
rooms = Room.objects.all()
context = {'rooms':rooms} #we put our parameters into a dictionary
return render(request,'base/home.html', context)
def room(request,pk):
room = Room.objects.get(id=pk);
context = {'room':room}
return render(request,'base/room.html',context)
def createRoom(request):
form = RoomForm()
if request.method=="POST":
form = RoomForm(request.POST)
if form.is_valid():
form.save() #it will automatically save the model in DB
return redirect('home') #we use name value of url here
context = {'form':form}
return render(request, 'base/room_form.html',context)
def updateRoom(request, pk):
room = Room.objects.get(id=pk)
form = RoomForm(instance=room) #pre filled with the room that given by id
if request.method=="POST":
form = RoomForm(request.POST,instance=room)
if form.is_valid:
form.save()
return redirect("home")
context = {'form': form}
return render(request, 'base/room_form.html',context)
def deleteRoom(request,pk):
room = Room.objects.get(id=pk)
if request.method=="POST":
room.delete()
return redirect('home')
return render(request, "base/delete.html",{'obj':room}) |
import ctypes
import time
import os
import pybgfx
class App(object):
def __init__(self):
pass
def init(self):
pass
def shutdown(self):
pass
def update(self, dt):
pass
def run(self):
glfw_dll_path = os.path.dirname(__file__) + "\\glfw3"
glfw = ctypes.CDLL(glfw_dll_path)
glfw.glfwGetWin32Window.restype = ctypes.c_void_p
glfw.glfwInit()
window = glfw.glfwCreateWindow(
self.width, self.height, self.title, 0, 0)
glfw.glfwMakeContextCurrent(window)
handle = glfw.glfwGetWin32Window(window)
pybgfx.bgfx.set_platform_data(handle)
self.init()
last_time = None
while not glfw.glfwWindowShouldClose(window):
glfw.glfwPollEvents()
now = time.time()
if last_time == None:
last_time = now
frame_time = now - last_time
last_time = now
self.update(frame_time)
self.shutdown()
glfw.glfwTerminate()
|
import numpy as np
from gym import spaces as gym_spaces
from . import Space
class DictSpace(Space):
def __init__(self, space_dict):
super().__init__()
self._space_dict = space_dict
# calculate dimension of flat space associated to this space
self._flat_dim = self._get_flat_dim_uncached()
def contains(self, value):
if not isinstance(value, dict):
return False
for key, c in value.items():
if not key in self._space_dict or not self._space_dict[key].contains(c):
return False
return True
@property
def length(self):
return len(self._space_dict)
def keys(self):
return self._space_dict.keys()
def get_flat_space(self, keys = None):
if keys is None:
keys = self._space_dict.keys()
# NOTE: Need sorting in order to avoid different orders of keys.
keys = sorted(keys)
spaces = [self._space_dict[key].get_flat_space() for key in keys]
return spaces[0].concatenate(*spaces)
def _get_flat_dim_uncached(self, keys = None):
if keys is None:
keys = self._space_dict.keys()
keys = sorted(keys)
subspace_dims = [self._space_dict[key].get_flat_dim() for key in keys]
return np.sum(subspace_dims)
def get_flat_dim(self, keys = None):
"""Get dimension of flat space associated to this space."""
# used cached value int this case. NOTE: This assumes the space is not modified after creation!
if keys is None:
return self._flat_dim
else:
return self._get_flat_dim_uncached(keys)
def flatten_value(self, value, keys = None):
if keys is None:
keys = self._space_dict.keys()
keys = sorted(keys)
flat_sub_values = [self._space_dict[key].flatten_value(value[key]) for key in keys]
flat_value = np.concatenate(flat_sub_values)
return flat_value
def unflatten_value(self, flat_value, keys = None):
"""Maps a vector from the space returned by flatten_value back to this space.
Assumes that flat_value contains only contributions from the subspaces with keys
listed in keys. Assumes that space (and subspaces) have not been modified since
creation."""
if keys is None:
keys = self._space_dict.keys()
keys = sorted(keys)
# dimensions of subspaces
dims = [self._space_dict[key].get_flat_dim() for key in keys]
indices = [0]
for dim in dims:
indices.append(indices[-1] + dim)
value = {key: self._space_dict[key].unflatten_value(flat_value[indices[i]:indices[i + 1]]) for i, key in enumerate(keys)}
return value
def sample(self):
return {key: space.sample() for key, space in self._space_dict.items()}
def get_gym_space(self):
dict_of_gym_spaces = {key: space.get_gym_space() for key, space in self._space_dict.items()}
return gym_spaces.Dict(dict_of_gym_spaces)
def __eq__(self, other):
if not (isinstance(other, DictSpace) and
self.keys() == other.keys()):
return False
for c1, c2 in zip(self._space_dict.values(), other._space_dict.values()):
if c1 != c1:
return False
return True
@staticmethod
def concatenate(*args):
space_dict = args[0]._space_dict
for arg in args[1:]:
space_dict.update(arg._space_dict)
return DictSpace(component_spaces)
@staticmethod
def get_from_gym_space(gym_space):
from .utils import space_from_gym_space
assert isinstance(gym_space, gym_spaces.Dict)
return DictSpace({key: space_from_gym_space(gs) for key, gs in gym_space.spaces.items()})
def __str__(self):
subspaces_string = ""
for key, s in self._space_dict.items():
if subspaces_string != "":
subspaces_string += ", "
subspaces_string += "{}: {}".format(key, s)
return "DictSpace({{ {} }})".format(subspaces_string)
def __getitem__(self, key):
return self._space_dict[key]
def get_subspace(self, keys):
return DictSpace({key: space for key, space in self._space_dict.items() if key in keys})
|
from picalculator import PiCalculator
def test():
pc = PiCalculator(10, 'T')
tasks = pc.submit_tasks()
for task in tasks:
task.run()
print(sum(task.result for task in tasks) / pc.n_cpu)
pc.close()
|
from torch.utils.data import Dataset
from kogpt2.utils import download, tokenizer, get_tokenizer
from gluonnlp.data import SentencepieceTokenizer
import gluonnlp
import numpy as np
import pandas as pd
def sentencePieceTokenizer():
tok_path = get_tokenizer()
sentencepieceTokenizer = SentencepieceTokenizer(tok_path)
return sentencepieceTokenizer
def koGPT2Vocab():
cachedir = "~/kogpt2/"
# download vocab
vocab_info = tokenizer
vocab_path = download(
vocab_info["url"], vocab_info["fname"], vocab_info["chksum"], cachedir=cachedir
)
koGPT2_vocab = gluonnlp.vocab.BERTVocab.from_sentencepiece(
vocab_path,
mask_token=None,
sep_token=None,
cls_token=None,
unknown_token="<unk>",
padding_token="<pad>",
bos_token="<s>",
eos_token="</s>",
)
return koGPT2_vocab
def toString(list):
if not list:
return ""
result = ""
for i in list:
result = result + i
return result
class storyDataset(Dataset):
"""script dataset"""
def __init__(self, file_path, vocab, tokenizer):
self.file_path = file_path
self.sentence_list = []
self.vocab = vocab
self.tokenizer = tokenizer
df = pd.read_csv(self.file_path)
for line in df["content"]:
tokenized_line = tokenizer(str(line))
print(tokenized_line[-1])
index_of_words = (
[vocab[vocab.bos_token],]
+ vocab[tokenized_line]
+ [vocab[vocab.eos_token]]
)
self.sentence_list.append(index_of_words)
print("sentence list length :", len(self.sentence_list))
def __len__(self):
return len(self.sentence_list)
def __getitem__(self, index):
return self.sentence_list[index]
class synoDataset(Dataset):
"""synopsis dataset"""
def __init__(self, file_path, vocab, tokenizer):
self.file_path = file_path
self.sentence_list = []
self.vocab = vocab
self.tokenizer = tokenizer
df = pd.read_csv(self.file_path)
df["genre"] = df["genre"].str.strip("[]").str.split(",")
# df['genre'] = df['genre'].fillna('none')
### gen_to_idx, genre_to_vocab 설정
gen_to_vocab = {}
genres = [
"SF",
"TV영화",
"공포",
"느와르",
"다큐멘터리",
"드라마",
"멜로",
"로맨스",
"모험",
"무협",
"뮤지컬",
"미스터리",
"범죄",
"서부",
"서스펜스",
"스릴러",
"애니메이션",
"액션",
"멜로/로맨스",
"가족",
"서사",
"전쟁",
"코미디",
"판타지",
]
print(f"We have {len(genres)} genres")
gen_to_idx = {}
for idx, gen in enumerate(genres):
gen_to_idx[gen] = idx + 6
idx_to_gen = {v: k for k, v in gen_to_idx.items()}
for idx, gen in idx_to_gen.items():
gen_to_vocab[gen] = vocab.idx_to_token[idx]
count = 0
err = 0
for idx in range(len(df)):
line = df.loc[idx, "content"]
genres = df.loc[idx, "genre"]
tokenized_line = tokenizer(str(line))
if genres == "'none'":
print(genres)
index_of_words = (
[vocab[vocab.bos_token],]
+ vocab[tokenized_line]
+ [vocab[vocab.eos_token]]
)
else:
tmp = []
for gen in genres:
try:
tmp.append(gen_to_vocab[gen.strip("' '")])
except Exception as e:
pass
if len(tmp) > 0:
count += 1
else:
err += 1
index_of_words = (
[vocab[vocab.bos_token],]
+ vocab[tmp]
+ vocab[tokenized_line]
+ [vocab[vocab.eos_token]]
)
self.sentence_list.append(index_of_words)
print(f"average length of data : {sum(df['content'].str.len()) / len(df)}")
print("total data :", len(self.sentence_list))
print("=== test genres ===")
print(f"we got {count} synos which have genres.")
print(f"we lose {err} synos because their genres are not included.")
print(f"match full == count + err {len(self.sentence_list) == count+err}")
def __len__(self):
return len(self.sentence_list)
def __getitem__(self, index):
return self.sentence_list[index]
if __name__ == "__main__":
import torch
from gluonnlp.data import SentencepieceTokenizer
from kogpt2.utils import get_tokenizer
from dataset import synoDataset
from kogpt2.pytorch_kogpt2 import get_pytorch_kogpt2_model
from time import time
tok_path = get_tokenizer()
_, vocab = get_pytorch_kogpt2_model()
tok = SentencepieceTokenizer(tok_path, num_best=0, alpha=0)
start = time()
print("Dataset Loading... ", end=" ")
dataset = synoDataset("./data/korean_naver_3.csv", vocab, tok)
end = time()
print(f"{start - end}")
|
# Time: push: O(n), pop: O(1), top: O(1)
# Space: O(n)
#
# Implement the following operations of a stack using queues.
#
# push(x) -- Push element x onto stack.
# pop() -- Removes the element on top of the stack.
# top() -- Get the top element.
# empty() -- Return whether the stack is empty.
# Notes:
# You must use only standard operations of a queue -- which
# means only push to back, peek/pop from front, size, and is
# empty operations are valid.
# Depending on your language, queue may not be supported natively.
# You may simulate a queue by using a list or deque (double-ended
# queue), as long as you use only standard operations of a queue.
# You may assume that all operations are valid (for example, no pop
# or top operations will be called on an empty stack).
import collections
class Queue:
def __init__(self):
self.data = collections.deque()
def push(self, x):
self.data.append(x)
def peek(self):
return self.data[0]
def pop(self):
return self.data.popleft()
def size(self):
return len(self.data)
def empty(self):
return len(self.data) == 0
class Stack:
# initialize your data structure here.
def __init__(self):
self.q_ = Queue()
# @param x, an integer
# @return nothing
def push(self, x):
self.q_.push(x)
for _ in xrange(self.q_.size() - 1):
self.q_.push(self.q_.pop())
# @return nothing
def pop(self):
self.q_.pop()
# @return an integer
def top(self):
return self.q_.peek()
# @return an boolean
def empty(self):
return self.q_.empty()
# Time: push: O(1), pop: O(n), top: O(1)
# Space: O(n)
class Stack2:
# initialize your data structure here.
def __init__(self):
self.q_ = Queue()
self.top_ = None
# @param x, an integer
# @return nothing
def push(self, x):
self.q_.push(x)
self.top_ = x
# @return nothing
def pop(self):
for _ in xrange(self.q_.size() - 1):
self.top_ = self.q_.pop()
self.q_.push(self.top_)
self.q_.pop()
# @return an integer
def top(self):
return self.top_
# @return an boolean
def empty(self):
return self.q_.empty()
|
cont = 0
cont_m = 0
cont_f20 = 0
cont_maioridade = 0
while True:
sexo = ' '
while(sexo not in 'MF'):
sexo = str(input('Masculino ou Feminino[M/F]? ')).upper().strip()
idade = int(input(f'Qual a idade da {cont + 1}° pessoa: '))
if(sexo == 'M'):
cont_m = cont_m + 1
elif(sexo == 'F' and idade < 20):
cont_f20 = cont_f20 + 1
if(idade >= 18):
cont_maioridade = cont_maioridade + 1
resposta = ' '
while(resposta not in 'SN'):
resposta = str(input('Deseja continuar o cadastro[S/N]? ')).upper().strip()
if(resposta == 'N'):
break
print(f'O total de pessoas com mais de 18 anos foram de {cont_maioridade}')
print(f'O total de homens cadastrados é de {cont_m}')
print(f'O total de mulheres com menos de 20 anos é de {cont_f20}')
print('----------------')
print('FIM DO PROGRAMA')
print('----------------') |
#-*- coding: UTF-8 -*-
from OpenGL.GL import *
import numpy as np
class Material(object):
def __init__(self, ambient, diffuse, specular, shininess):
self.__ambient = np.array(ambient, dtype = np.float32)
self.__diffuse = np.array(diffuse, dtype = np.float32)
self.__specular = np.array(specular, dtype = np.float32)
self.__shininess = np.array([shininess], dtype = np.float32)
def getAmbient(self):
return self.__ambient
def getDiffuse(self):
return self.__diffuse
def getSpecular(self):
return self.__specular
def getShininess(self):
return self.__shininess
def toOpenGL(self, program):
glUniform3fv(glGetUniformLocation(program, 'material.ambient'), 1, self.__ambient)
glUniform3fv(glGetUniformLocation(program, 'material.diffuse'), 1, self.__diffuse)
glUniform3fv(glGetUniformLocation(program, 'material.specular'), 1, self.__specular)
glUniform1f(glGetUniformLocation(program, 'material.shininess'), self.__shininess)
Material.Emerald = Material( (0.633 , 0.727811 , 0.633 ), (0.07568 , 0.61424 , 0.07568 ), (0.0215 , 0.1745 , 0.0215 ), 0.6 )
Material.Jade = Material( (0.316228 , 0.316228 , 0.316228 ), (0.54 , 0.89 , 0.63 ), (0.135 , 0.2225 , 0.1575 ), 0.1 )
Material.Obsidian = Material( (0.332741 , 0.328634 , 0.346435 ), (0.18275 , 0.17 , 0.22525 ), (0.05375 , 0.05 , 0.06625 ), 0.3 )
Material.Pearl = Material( (0.296648 , 0.296648 , 0.296648 ), (1 , 0.829 , 0.829 ), (0.25 , 0.20725 , 0.20725 ), 0.088 )
Material.Ruby = Material( (0.727811 , 0.626959 , 0.626959 ), (0.61424 , 0.04136 , 0.04136 ), (0.1745 , 0.01175 , 0.01175 ), 0.6 )
Material.Turquoise = Material( (0.297254 , 0.30829 , 0.306678 ), (0.396 , 0.74151 , 0.69102 ), (0.1 , 0.18725 , 0.1745 ), 0.1 )
Material.Brass = Material( (0.992157 , 0.941176 , 0.807843 ), (0.780392 , 0.568627 , 0.113725 ), (0.329412 , 0.223529 , 0.027451 ), 0.21794872)
Material.Bonze = Material( (0.393548 , 0.271906 , 0.166721 ), (0.714 , 0.4284 , 0.18144 ), (0.2125 , 0.1275 , 0.054 ), 0.2 )
Material.Chrome = Material( (0.774597 , 0.774597 , 0.774597 ), (0.4 , 0.4 , 0.4 ), (0.25 , 0.25 , 0.25 ), 0.6 )
Material.Copper = Material( (0.256777 , 0.137622 , 0.086014 ), (0.7038 , 0.27048 , 0.0828 ), (0.19125 , 0.0735 , 0.0225 ), 0.1 )
Material.Gold = Material( (0.628281 , 0.555802 , 0.366065 ), (0.75164 , 0.60648 , 0.22648 ), (0.24725 , 0.1995 , 0.0745 ), 0.4 )
Material.Silver = Material( (0.508273 , 0.508273 , 0.508273 ), (0.50754 , 0.50754 , 0.50754 ), (0.19225 , 0.19225 , 0.19225 ), 0.4 )
Material.BlackPlastic = Material( (0.50 , 0.50 , 0.50 ), (0.01 , 0.01 , 0.01 ), (0.0 , 0.0 , 0.0 ), 0.25 )
Material.CyanPlastic = Material( (0.50196078, 0.50196078, 0.50196078), (0.0 , 0.50980392, 0.50980392), (0.0 , 0.1 , 0.06 ), 0.25 )
Material.GreenPlastic = Material( (0.45 , 0.55 , 0.45 ), (0.1 , 0.35 , 0.1 ), (0.0 , 0.0 , 0.0 ), 0.25 )
Material.RedPlastic = Material( (0.7 , 0.6 , 0.6 ), (0.5 , 0.0 , 0.0 ), (0.0 , 0.0 , 0.0 ), 0.25 )
Material.WhitePlastic = Material( (0.70 , 0.70 , 0.70 ), (0.55 , 0.55 , 0.55 ), (0.0 , 0.0 , 0.0 ), 0.25 )
Material.YellowPlastic = Material( (0.60 , 0.60 , 0.50 ), (0.5 , 0.5 , 0.0 ), (0.0 , 0.0 , 0.0 ), 0.25 )
Material.BlackRubber = Material( (0.4 , 0.4 , 0.4 ), (0.01 , 0.01 , 0.01 ), (0.02 , 0.02 , 0.02 ), 0.078125 )
Material.CyanRubber = Material( (0.04 , 0.7 , 0.7 ), (0.4 , 0.5 , 0.5 ), (0.0 , 0.05 , 0.05 ), 0.078125 )
Material.GreenRubber = Material( (0.04 , 0.7 , 0.04 ), (0.4 , 0.5 , 0.4 ), (0.0 , 0.05 , 0.0 ), 0.078125 )
Material.RedRubber = Material( (0.7 , 0.04 , 0.04 ), (0.5 , 0.4 , 0.4 ), (0.05 , 0.0 , 0.0 ), 0.078125 )
Material.WhiteRubber = Material( (0.7 , 0.7 , 0.7 ), (0.5 , 0.5 , 0.5 ), (0.05 , 0.05 , 0.05 ), 0.078125 )
Material.YellowRubber = Material( (0.7 , 0.7 , 0.04 ), (0.5 , 0.5 , 0.4 ), (0.05 , 0.05 , 0.0 ), 0.078125 )
Material.ALL = [Material.Emerald, Material.Jade, Material.Obsidian, Material.Pearl, Material.Ruby, Material.Turquoise, Material.Brass, Material.Bonze, Material.Chrome, Material.Copper, Material.Gold, Material.Silver, Material.BlackPlastic, Material.CyanPlastic, Material.GreenPlastic, Material.RedPlastic, Material.WhitePlastic, Material.YellowPlastic, Material.BlackRubber, Material.CyanRubber, Material.GreenRubber, Material.RedRubber, Material.WhiteRubber, Material.YellowRubber]
# Source: http://devernay.free.fr/cours/opengl/materials.html |
print(True and True) |
import datetime
from django.test import TestCase
from freezegun import freeze_time
from symposion.proposals.tests.factories import (
ProposalBaseFactory,
ProposalKindFactory,
ProposalSectionFactory,
)
from symposion.conference.tests.factories import SectionFactory
class ProposalsTest(TestCase):
def test_can_edit(self):
conf_section = SectionFactory(slug="widgets")
prop_section = ProposalSectionFactory(section=conf_section)
prop_kind = ProposalKindFactory(section=conf_section, slug="widget")
prop = ProposalBaseFactory(kind=prop_kind)
# Normal Operation
prop_section.closed = False
prop_section.save()
self.assertTrue(prop.can_edit())
# Start/End Dates
prop_section.start = datetime.datetime(2017, 9, 20)
prop_section.end = datetime.datetime(2017, 9, 29)
prop_section.save()
with freeze_time("2017-09-19"):
self.assertFalse(prop.can_edit())
with freeze_time("2017-09-25"):
self.assertTrue(prop.can_edit())
with freeze_time("2017-09-30"):
self.assertFalse(prop.can_edit())
# Explicit Close
with freeze_time("2017-09-25"):
prop_section.closed = True
prop_section.save()
self.assertFalse(prop.can_edit())
|
import csv
from pathlib import Path
root = "data/"
def parse_file(filename):
"""Returns content of file as a list or dictionary
:param filename: filename (including file extension)
:return: a list containing the contents of each line if filename ends in .txt, otherwise a dictionary corresponding to the content if filename ends in .cvs
"""
filepath = root + filename
try:
if filename.endswith('.txt'):
with open(filepath) as file:
return file.read().splitlines()
elif filename.endswith('.csv'):
reader = csv.reader(open(filepath))
result = {}
for row in reader:
key = row[0]
result[key] = row[1]
return result
except FileNotFoundError:
with open(filepath,"w+") as file:
append_string_to_textfile(filename, "Placeholder")
print("here")
return (parse_file(filename))
def search_file(filename, term):
"""searches content of a .txt file for a term
:param filename: filename (including file extension)
:param term: term to search for
:return: true is term is found in file, otherwise false
"""
try:
filepath = root + filename
with open(filepath) as file:
return term in file.read().splitlines()
except FileNotFoundError:
with open(filepath,"w+") as file:
return (search_file(filename, term))
def append_string_to_textfile(filename, string):
"""appends a string to the end of the .txt file on a new line
:param filename: full filename including extension
:param term: string to append
"""
filepath = root + filename
with open(filepath, 'a+') as file:
file.write(string + "\n")
def append_dict_to_csv_file(filename, dictionary):
filepath = root + filename
if Path(filepath).is_file():
with open(filepath, 'a') as file:
fieldnames = list(dictionary.keys())
dict_writer = csv.DictWriter(file, fieldnames = fieldnames)
dict_writer.writerow(dictionary)
else:
with open(filepath, 'w', newline='') as file:
fieldnames = list(dictionary.keys())
writer = csv.DictWriter(file, delimiter=',', lineterminator='\n',fieldnames=fieldnames)
writer.writeheader();
return (append_dict_to_csv_file(filename, dictionary))
|
default_app_config = 'events.apps.EventsAppConfig'
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2,
random_state=42)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200)
# classifier with early stopping on training data
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
model_dir='/tmp/iris_model/')
classifier1.fit(X_train, y_train, steps=2000)
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
model_dir='/tmp/iris_model_val/')
classifier2.fit(X_train, y_train, val_monitor, steps=2000)
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# In many applications, the score is improved by using early stopping
print(score2 > score1)
|
import sympy as sym
from sympy.abc import s, t, x, y, z
import numpy as np
from sympy.integrals import inverse_laplace_transform
import matplotlib.pyplot as plt
# Define inputs
# First step (up) starts at 1 sec
U1 = 2 / s * sym.exp(-s)
# Ramp (down) starts at 3 sec
U2 = -1 / s ** 2 * sym.exp(-3 * s)
# Ramp completes at 5 sec
U3 = 1 / s ** 2 * sym.exp(-5 * s)
# Transfer function
G = 5 * (s + 1) / (s + 3) ** 2
# Calculate responses
Y1 = G * U1
Y2 = G * U2
Y3 = G * U3
# Inverse Laplace Transform
u1 = inverse_laplace_transform(U1, s, t)
u2 = inverse_laplace_transform(U2, s, t)
u3 = inverse_laplace_transform(U3, s, t)
y1 = inverse_laplace_transform(Y1, s, t)
y2 = inverse_laplace_transform(Y2, s, t)
y3 = inverse_laplace_transform(Y3, s, t)
print('y1')
print(y1)
# generate data for plot
tm = np.linspace(0, 8, 100)
us = np.zeros(len(tm))
ys = np.zeros(len(tm))
# substitute numeric values for u and y
for u in [u1, u2, u3]:
for i in range(len(tm)):
us[i] += u.subs(t, tm[i])
for y in [y1, y2, y3]:
for i in range(len(tm)):
ys[i] += y.subs(t, tm[i])
# plot results
plt.figure()
plt.plot(tm, us, label='u(t)')
plt.plot(tm, ys, label='y(t)')
plt.legend()
plt.xlabel('Time')
plt.show()
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class BgpEvpnSpmsiV4(Base):
"""
The BgpEvpnSpmsiV4 class encapsulates a required bgpEvpnSpmsiV4 resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'bgpEvpnSpmsiV4'
_SDM_ATT_MAP = {
'Active': 'active',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableSpmsiTraffic': 'enableSpmsiTraffic',
'GroupAddress': 'groupAddress',
'GroupAddressCountSPMSI': 'groupAddressCountSPMSI',
'GroupAddressStep': 'groupAddressStep',
'Mode': 'mode',
'MulticastTunnelType': 'multicastTunnelType',
'Name': 'name',
'SPmsiTunnelCount': 'sPmsiTunnelCount',
'SenderAddress': 'senderAddress',
'SenderAddressStep': 'senderAddressStep',
'SourceAddressCountSPMSI': 'sourceAddressCountSPMSI',
'SourceGroupMappingSPMSI': 'sourceGroupMappingSPMSI',
'StartGroupAddressSPMSI': 'startGroupAddressSPMSI',
'StartSourceAddressIpv4': 'startSourceAddressIpv4',
'UpstreamAssignedLabel': 'upstreamAssignedLabel',
'UpstreamAssignedLabelStep': 'upstreamAssignedLabelStep',
'UseUpstreamAssignedLabel': 'useUpstreamAssignedLabel',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(BgpEvpnSpmsiV4, self).__init__(parent, list_op)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableSpmsiTraffic(self):
# type: () -> bool
"""
Returns
-------
- bool: Enable SPMSI Traffic
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableSpmsiTraffic'])
@EnableSpmsiTraffic.setter
def EnableSpmsiTraffic(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['EnableSpmsiTraffic'], value)
@property
def GroupAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Group Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GroupAddress']))
@property
def GroupAddressCountSPMSI(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): C-Group Address Count
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GroupAddressCountSPMSI']))
@property
def GroupAddressStep(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Group Address Step
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GroupAddressStep']))
@property
def Mode(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Mode
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Mode']))
@property
def MulticastTunnelType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Multicast Tunnel Type
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastTunnelType']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def SPmsiTunnelCount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): S-PMSI Tunnel Count
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SPmsiTunnelCount']))
@property
def SenderAddress(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Sender Address/P-Root Node Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SenderAddress']))
@property
def SenderAddressStep(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Sender Address/P-Root Node Address Step
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SenderAddressStep']))
@property
def SourceAddressCountSPMSI(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): C-Source Address Count
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceAddressCountSPMSI']))
@property
def SourceGroupMappingSPMSI(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source Group Mapping
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceGroupMappingSPMSI']))
@property
def StartGroupAddressSPMSI(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Start C-Group Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StartGroupAddressSPMSI']))
@property
def StartSourceAddressIpv4(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Start C-Source Address
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StartSourceAddressIpv4']))
@property
def UpstreamAssignedLabel(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Upstream Assigned Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UpstreamAssignedLabel']))
@property
def UpstreamAssignedLabelStep(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Upstream Assigned Label Step
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UpstreamAssignedLabelStep']))
@property
def UseUpstreamAssignedLabel(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Use Upstream Assigned Label
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseUpstreamAssignedLabel']))
def update(self, EnableSpmsiTraffic=None, Name=None):
# type: (bool, str) -> BgpEvpnSpmsiV4
"""Updates bgpEvpnSpmsiV4 resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- EnableSpmsiTraffic (bool): Enable SPMSI Traffic
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, Active=None, GroupAddress=None, GroupAddressCountSPMSI=None, GroupAddressStep=None, Mode=None, MulticastTunnelType=None, SPmsiTunnelCount=None, SenderAddress=None, SenderAddressStep=None, SourceAddressCountSPMSI=None, SourceGroupMappingSPMSI=None, StartGroupAddressSPMSI=None, StartSourceAddressIpv4=None, UpstreamAssignedLabel=None, UpstreamAssignedLabelStep=None, UseUpstreamAssignedLabel=None):
"""Base class infrastructure that gets a list of bgpEvpnSpmsiV4 device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- GroupAddress (str): optional regex of groupAddress
- GroupAddressCountSPMSI (str): optional regex of groupAddressCountSPMSI
- GroupAddressStep (str): optional regex of groupAddressStep
- Mode (str): optional regex of mode
- MulticastTunnelType (str): optional regex of multicastTunnelType
- SPmsiTunnelCount (str): optional regex of sPmsiTunnelCount
- SenderAddress (str): optional regex of senderAddress
- SenderAddressStep (str): optional regex of senderAddressStep
- SourceAddressCountSPMSI (str): optional regex of sourceAddressCountSPMSI
- SourceGroupMappingSPMSI (str): optional regex of sourceGroupMappingSPMSI
- StartGroupAddressSPMSI (str): optional regex of startGroupAddressSPMSI
- StartSourceAddressIpv4 (str): optional regex of startSourceAddressIpv4
- UpstreamAssignedLabel (str): optional regex of upstreamAssignedLabel
- UpstreamAssignedLabelStep (str): optional regex of upstreamAssignedLabelStep
- UseUpstreamAssignedLabel (str): optional regex of useUpstreamAssignedLabel
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
import json
import sys
# from pyyaml package
import yaml
_dsl_schema_cache = None
_dsl_validator_cache = None
_table_admin_privs = ["SELECT", "INSERT", "DELETE", "OWNERSHIP"]
_table_public_privs = ["SELECT"]
def dsl_json_schema():
global _dsl_schema_cache
if _dsl_schema_cache is not None:
return _dsl_schema_cache
try:
import importlib.resources as pkg_resources
except ImportError:
# try 3.7 backport as a fallback
import importlib_resources as pkg_resources
from . import jsonschema
with pkg_resources.open_text(jsonschema, "dsl-schema.json") as schemafile:
_dsl_schema_cache = json.load(schemafile)
return _dsl_schema_cache
def dsl_json_validator():
global _dsl_validator_cache
if _dsl_validator_cache is not None:
return _dsl_validator_cache
import jsonschema
_dsl_validator_cache = jsonschema.Draft7Validator(dsl_json_schema())
# future work: if we ever desire to support default vales, use the following
# to extend validation with filling in defaults from the json-schema spec
# https://github.com/Julian/jsonschema/issues/4#issuecomment-4396738
return _dsl_validator_cache
def _acl_groups(jobj: dict, k="admin") -> list:
return [e["group"] for e in jobj[k] if ("group" in e)]
def _acl_users(jobj: dict, k="admin") -> list:
return [e["user"] for e in jobj[k] if ("user" in e)]
# python is so dumb
def _union(d1: dict, d2: dict) -> dict:
"""equivalent to (d1 | d2) in py >= 3.9"""
u = d1.copy()
u.update(d2)
return u
# so dumb
def _concat(l1: list, l2: list) -> list:
c = l1.copy()
c.extend(l2)
return c
def dsl_to_rules(dsl: dict, validate=True) -> dict: # noqa: C901
"""
Transform DSL json structure to trino 'rules.json' structure
Currently the expected format of 'dsl' parameter is only defined via the
example dsl files in the examples directory, for example here:
https://github.com/os-climate/osc-trino-acl-dsl/blob/main/examples/dsl-example-1.json
This function returns a 'dict' structure that can be written using 'json.dump' to produce
a 'rules.json' file ingestable by trino.
"""
if validate:
# validate the dsl json structure against the DSL json-schema
dsl_json_validator().validate(dsl)
# mypy type checking may prefer these be typed but needs py3.9 to
# support clean list[dict] type annotation
catalog_rules = []
schema_rules = []
table_rules = []
# rules configuring admin acl go first to ensure they override anything else
ugs = _acl_groups(dsl)
if len(ugs) > 0:
# if any group entries were present, insert corresponding admin rules
catalog_rules.append({"group": "|".join(ugs), "allow": "all"})
schema_rules.append({"group": "|".join(ugs), "owner": True})
table_rules.append({"group": "|".join(ugs), "privileges": _table_admin_privs})
ugs = _acl_users(dsl)
if len(ugs) > 0:
# if any user entries were present, insert corresponding admin rules
catalog_rules.append({"user": "|".join(ugs), "allow": "all"})
schema_rules.append({"user": "|".join(ugs), "owner": True})
table_rules.append({"user": "|".join(ugs), "privileges": _table_admin_privs})
# any schema or table admins require "allow":"all" on the associated catalog
# so it is most effective to just accumulate these and add corresponding rules
# in the catalog section
# https://trino.io/docs/current/security/file-system-access-control.html#catalog-schema-and-table-access
# note there is not a similar issue for table -> schema ownerships
uallow: dict = {}
# the semantic definition for schema admin is that it includes
# admin over any table in that schema, so these rules need to appear before other table
# related rules
for spec in dsl["schemas"]:
cst = {"catalog": spec["catalog"], "schema": spec["schema"]}
# configure group(s) with ownership of this schema
ugs = _acl_groups(spec)
if len(ugs) > 0:
schema_rules.append(_union(cst, {"group": "|".join(ugs), "owner": True}))
# ensure that schema admins also have full table-level privs inside their schema
table_rules.append(_union(cst, {"group": "|".join(ugs), "privileges": _table_admin_privs}))
# add corresponding rules for any user patterns
ugs = _acl_users(spec)
if len(ugs) > 0:
schema_rules.append(_union(cst, {"user": "|".join(ugs), "owner": True}))
table_rules.append(_union(cst, {"user": "|".join(ugs), "privileges": _table_admin_privs}))
uallow[spec["catalog"]] = _concat(uallow.get(spec["catalog"], []), spec["admin"])
# table rules go here
for spec in dsl["tables"]:
cst = {"catalog": spec["catalog"], "schema": spec["schema"], "table": spec["table"]}
# "admin" is optional for any individual table because schema admins
# are also table admins for any table in the schema
if "admin" in spec:
# table admin group rules go first to override others
rule = _union(cst, {"privileges": _table_admin_privs})
ugs = _acl_groups(spec)
if len(ugs) > 0:
table_rules.append(_union({"group": "|".join(ugs)}, rule))
ugs = _acl_users(spec)
if len(ugs) > 0:
table_rules.append(_union({"user": "|".join(ugs)}, rule))
uallow[spec["catalog"]] = _concat(uallow.get(spec["catalog"], []), spec["admin"])
# construct acl rules if any are configured
uhide = set()
ufilter = set()
if "acl" in spec:
for acl in spec["acl"]:
rule = _union(cst, {"privileges": _table_public_privs})
if "hide" in acl:
uhide.update(set(acl["hide"]))
rule.update({"columns": [{"name": col, "allow": False} for col in acl["hide"]]})
if "filter" in acl:
ufilter.update(set(acl["filter"]))
rule.update({"filter": " and ".join([f"({f})" for f in acl["filter"]])})
ugs = _acl_groups(acl, k="id")
if len(ugs) > 0:
table_rules.append(_union({"group": "|".join(ugs)}, rule))
ugs = _acl_users(acl, k="id")
if len(ugs) > 0:
table_rules.append(_union({"user": "|".join(ugs)}, rule))
# table default policy goes last
# spec['public'] can be either boolean or an object, and it
# registers as True if it is an object or boolean value True
public = spec["public"]
pub = ((type(public) == bool) and public) or (type(public) == dict)
rule = _union(cst, {"privileges": (_table_public_privs if pub else [])})
if type(public) == dict:
# if 'public' was specified as an object with settings, then
# include these in the union of all hidden columns and filters
if "hide" in public:
uhide.update(set(public["hide"]))
if "filter" in public:
ufilter.update(set(public["filter"]))
if pub:
# if table is set to general public access, then include
# all hidden columns and row filters in the acl list, so that
# public cannot see anything hidden by any other row/col ACL rule
if len(uhide) > 0:
rule.update({"columns": [{"name": col, "allow": False} for col in sorted(list(uhide))]})
if len(ufilter) > 0:
rule.update({"filter": " and ".join([f"({f})" for f in sorted(list(ufilter))])})
table_rules.append(rule)
# default schema rules for tables are lower priority than specific table rules
for spec in dsl["schemas"]:
cst = {"catalog": spec["catalog"], "schema": spec["schema"]}
# set the default public privs inside this schema
table_rules.append(_union(cst, {"privileges": _table_public_privs if spec["public"] else []}))
# next are catalog rules
for spec in dsl["catalogs"]:
rule = {"catalog": spec["catalog"], "allow": "all"}
# configure group(s) with read+write access to this catalog
# I have concerns about how using "|" style regex is going to scale if number
# of schemas and tables grows large, so I am going to encode these as individual rules
ugs = sorted(list(set([e["group"] for e in uallow.get(spec["catalog"], []) if "group" in e])))
for ug in ugs:
catalog_rules.append(_union({"group": ug}, rule))
ugs = sorted(list(set([e["user"] for e in uallow.get(spec["catalog"], []) if "user" in e])))
for ug in ugs:
catalog_rules.append(_union({"user": ug}, rule))
# catalog rules for tables section are lower priority than schema rules above
table_rules.append({"catalog": spec["catalog"], "privileges": _table_public_privs if spec["public"] else []})
# global default rules go last
table_rules.append(
{
# default table privs can be 'read-only' (i.e. select) or 'no privileges'
"privileges": (_table_public_privs if dsl["public"] else [])
}
)
schema_rules.append(
{
# defaulting all schemas to owner is not safe
# schemas should be assigned ownership on an explicit basis
"owner": False
}
)
catalog_rules.append(
{
# allows basic 'show schemas' and 'show tables' operations for everyone
"allow": "read-only"
}
)
# assemble the final json structure and return it
rules = {"catalogs": catalog_rules, "schemas": schema_rules, "tables": table_rules}
return rules
def main():
dsl_fname = sys.argv[1]
with open(dsl_fname, "r") as dsl_file:
if dsl_fname.endswith(".json"):
dsl = json.load(dsl_file)
elif dsl_fname.endswith(".yaml"):
dsl = yaml.safe_load(dsl_file)
else:
raise ValueError(f"Filename {dsl_fname} had unrecognized suffix")
rules = dsl_to_rules(dsl, validate=True)
with sys.stdout as rules_file:
json.dump(rules, rules_file, indent=4)
rules_file.write("\n")
if __name__ == "__main__":
main()
|
# Unicode symbols in UTF-8
# DOWNWARDS ARROW
DOWN_ARROW = b"\xe2\x86\x93"
# BLACK DOWN-POINTING TRIANGLE
DOWN_TRIANGLE = b"\xe2\x96\xbc"
|
# Generated by Django 3.1.5 on 2021-01-22 22:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("course", "0023_auto_20210115_1332"),
("quiz", "0002_auto_20210120_0738"),
]
operations = [
migrations.AlterField(
model_name="quiz",
name="chapter",
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="quiz",
to="course.chapter",
),
),
]
|
#!/usr/bin/env python3
import os
import yaml
import click
import logging
import pandas as pd
logging.basicConfig(level=logging.INFO)
@click.group()
def cli():
pass
@cli.command()
@click.option('-i', '--input', type=click.Path(exists=True),
help="""
path to a TSV to be summarised.
""")
def sort(input):
try:
df = pd.read_csv(input, sep="\t")
df['?s']=df['?s'].str.replace('<http://purl.obolibrary.org/obo/','')
df['?s']=df['?s'].str.replace('<http://www.geneontology.org/formats/oboInOwl#','oio:')
df['?s']=df['?s'].str.replace('<http://www.w3.org/2000/01/rdf-schema#','rdfs:')
df['?s']=df['?s'].str.replace('_',':')
df['?s']=df['?s'].str.replace('>','')
df['?p']=df['?p'].str.replace('<http://purl.obolibrary.org/obo/','')
df['?p']=df['?p'].str.replace('<http://www.w3.org/2002/07/owl#','owl:')
df['?p']=df['?p'].str.replace('<http://purl.org/dc/terms/','dc:')
df['?p']=df['?p'].str.replace('<http://www.geneontology.org/formats/oboInOwl#','oio:')
df['?p']=df['?p'].str.replace('<http://www.w3.org/2000/01/rdf-schema#','rdfs:')
df['?p']=df['?p'].str.replace('_',':')
df['?p']=df['?p'].str.replace('>','')
df['o']=df['?s'].str.replace('[:][0-9]+','',regex=True)
print(df['o'].value_counts())
df.to_csv(input, sep='\t', index=False)
except Exception as e:
print(f"{input} could not be loaded")
print(e)
if __name__ == '__main__':
cli()
|
import requests
import threading
from config import GITTER_WEBHOOK, SLACK_WEBHOOK
def notify_all(label, counter):
threads = []
gitter = threading.Thread(target=_gitter, args=(label, counter))
threads.append(gitter)
slack = threading.Thread(target=_slack, args=(label, counter))
threads.append(slack)
for thread in threads:
thread.setDaemon(True)
thread.start()
def _gitter(label, counter):
if GITTER_WEBHOOK is None:
return
payload = {'message': '*{}* migration counter: *{}*'.format(label, counter)}
requests.post(GITTER_WEBHOOK, data=payload)
def _slack(label, counter):
if SLACK_WEBHOOK is None:
return
payload = {'text': '_*{}* migration counter: *{}*_'.format(label, counter)}
requests.post(SLACK_WEBHOOK, json=payload)
|
from kivy.garden.matplotlib.backend_kivyagg import (
FigureCanvasKivyAgg,
NavigationToolbar2Kivy,
)
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from matplotlib.figure import Figure
import numpy as np
def on_canvas_click(event):
if event.inaxes:
event.inaxes.plot([event.xdata], [event.ydata], marker="o", color="r")
event.canvas.draw()
# Create main application.
class HelloApp(App):
def build(self):
data = np.random.random((10, 10))
# Create the widgets. We need a vertical box to arrange the navigation toolbar
hbox = BoxLayout()
vbox = BoxLayout(orientation="vertical")
label = Label(text="Click on the plot as many times as you want!")
# Create the figure.
fig = Figure()
axes = fig.add_subplot()
axes.imshow(data)
canvas = FigureCanvasKivyAgg(fig)
nav = NavigationToolbar2Kivy(canvas)
# Add them to a container.
vbox.add_widget(canvas)
vbox.add_widget(nav.actionbar)
hbox.add_widget(label)
hbox.add_widget(vbox)
# Add the callback of the canvas.
canvas.mpl_connect("button_press_event", on_canvas_click)
canvas.draw()
# Return the top container. This can be any widget
return hbox
# Run the main windoow loop, which starts the program.
if __name__ == "__main__":
from kivy.config import Config
# We don't want a fullscreen App here.
Config.set("graphics", "fullscreen", "0")
HelloApp().run()
|
import random
from pygamelib.assets import graphics
from pygamelib.gfx import core
from pygamelib import board_items, constants, actuators, base
from blessed import Terminal
from game import media
import time
from copy import deepcopy
terminal = Terminal()
class Cell(board_items.BoardItemComplexComponent):
def __init__(self, **kwargs):
if "type" not in kwargs:
kwargs["type"] = "cell"
board_items.BoardItemComplexComponent.__init__(self, **kwargs)
if "multi_color" in kwargs and type(kwargs["multi_color"]) is bool:
self.multi_color = kwargs["multi_color"]
else:
self.multi_color = random.choice([True, False])
self.colors = list()
self.sprixel = core.Sprixel(" ")
if "color1" in kwargs and isinstance(kwargs["color1"], media.Color):
self.colors.append(kwargs["color1"])
else:
self.colors.append(media.Color.random())
self.sprixel.bg_color = terminal.on_color_rgb(
self.colors[0].r, self.colors[0].g, self.colors[0].b
)
self.__blocks = None
if self.multi_color:
# self.__blocks = list()
# for n in dir(graphics.Blocks):
# if n.startswith("__"):
# continue
# self.__blocks.append(graphics.Blocks.__dict__[n])
if "color2" in kwargs and isinstance(kwargs["color2"], media.Color):
self.colors.append(kwargs["color2"])
else:
self.colors.append(self.colors.append(media.Color.random()))
# self.sprixel.model = random.choice(self.__blocks) * 2
self.sprixel.model = self.get_random_block()
self.sprixel.fg_color = terminal.color_rgb(
self.colors[1].r, self.colors[1].g, self.colors[1].b
)
def set_color(self, color, idx=0):
if isinstance(color, media.Color):
self.colors[idx] = color
def get_random_block(self):
if self.__blocks is None:
self.__blocks = list()
for n in dir(graphics.Blocks):
if n.startswith("__"):
continue
self.__blocks.append(graphics.Blocks.__dict__[n])
return random.choice(self.__blocks) * 2
class Organism(board_items.ComplexNPC):
mutation_rate = 0.1
base_lifespan = 5
base_lifespan_variation = 0.25
def __init__(self, **kwargs):
self.cells = list()
if "cells" in kwargs and type(kwargs["cells"]) is list:
self.cells = kwargs["cells"]
spr = list()
if type(self.cells[0]) is list:
for r in self.cells:
nl = list()
for c in r:
nl.append(c.sprixel)
spr.append(nl)
kwargs["sprite"] = core.Sprite(sprixels=spr)
elif isinstance(self.cells[0], Cell):
spr = list()
for c in self.cells:
spr.append(c.sprixel)
kwargs["sprite"] = core.Sprite(sprixels=[spr])
if "type" not in kwargs:
kwargs["type"] = "organism"
super().__init__(**kwargs)
self.gen = 0
if "gen" in kwargs and type(kwargs["gen"]) is int:
self.gen = kwargs["gen"]
self.lifespan = random.randint(
round(
Organism.base_lifespan
- Organism.base_lifespan * Organism.base_lifespan_variation
),
round(
Organism.base_lifespan
+ Organism.base_lifespan * Organism.base_lifespan_variation
),
)
if "lifespan" in kwargs and type(kwargs["lifespan"]) is int:
self.lifespan = kwargs["lifespan"]
self.initial_lifespan = self.lifespan
self.note = None
if "note" in kwargs and isinstance(kwargs["note"], media.Note):
self.note = kwargs["note"]
self.chord = None
if "chord" in kwargs and isinstance(kwargs["chord"], media.Chord):
self.chord = kwargs["chord"]
self.starting_position = [0, 0]
if "starting_position" in kwargs and type(kwargs["starting_position"]) is int:
self.starting_position = kwargs["starting_position"]
# target is an item
self.target = None
if "target" in kwargs and type(kwargs["target"]) is int:
self.target = kwargs["target"]
self.timestamp = time.time()
def reproduce(self, other=None):
# We can all agree that it's a little sad
if other is None:
if self.lifespan >= 2:
self.lifespan = int(self.lifespan / 2)
o = deepcopy(self)
o.initial_lifespan = o.lifespan
return o
return None
else:
new_width = random.randint(
min([self.width, other.width]), max([self.width, other.width]),
)
new_height = random.randint(
min([self.height, other.height]), max([self.height, other.height]),
)
# size: [width, height]
# unicellular organisms have a chance to become multicellular
# We'll treat each cell as a "gene"
c = media.Color(0, 0, 0)
mix = random.random()
mc = False
if mix > 0.4 and mix <= 0.6 and not self.cells[0].multi_color:
mc = random.choice([True, False])
if mc:
c1 = media.Color(
self.cells[0].colors[0].r,
self.cells[0].colors[0].g,
self.cells[0].colors[0].b,
)
c2 = media.Color(
other.cells[0].colors[0].r,
other.cells[0].colors[0].g,
other.cells[0].colors[0].b,
)
new = Organism(
size=[new_width, new_height],
cells=[Cell(multi_color=mc, color1=c1, color2=c2)],
)
return new
else:
for elt in ["r", "g", "b"]:
# Organisms can take either the genome of parent 1 or parent 2 or a
# blending of them. They have 40% chance to take a gradient from one or
# the other parent and 60% chance to be a mix (20% perfect mix, 20%
# closer to parent 1 and 20% closer to parent 2).
mc = False
if mix <= 0.2:
c.__setattr__(
elt, self.cells[0].colors[0].__getattribute__(elt)
)
elif mix <= 0.4:
c.__setattr__(
elt, other.cells[0].colors[0].__getattribute__(elt)
)
else:
distance = 1
factor = 1
if mix <= 0.6:
# perfect mix
distance = 2
elif mix <= 0.8:
# closer to self
distance = 3
else:
# closer to other
factor = 2
distance = 3
c.__setattr__(
elt,
round(
(
self.cells[0].colors[0].__getattribute__(elt)
+ other.cells[0].colors[0].__getattribute__(elt)
)
* (factor / distance)
),
)
new = Organism(
size=[new_width, new_height],
cells=[Cell(multi_color=False, color1=c)],
)
new.actuator = actuators.RandomActuator(
moveset=self.actuator.moveset[
0 : round(len(self.actuator.moveset) / 2)
]
+ other.actuator.moveset[round(len(other.actuator.moveset) / 2) :]
)
# if self.chord is not None or other.chord is not None:
# if self.chord is not None:
# new.chord = media.Chord(self.chord.name)
# else:
# new.chord = media.Chord(other.chord.name)
# elif self.note is not None or other.note is not None:
# if self.note is not None:
# new.note = media.Note(self.note.name)
# else:
# new.note = media.Note(other.note.name)
return new
def mutate(self):
if random.random() <= Organism.mutation_rate:
ch = random.choice([1, 2, 3, 4])
if ch == 1:
# color
mut = random.choice([True, False])
if mut:
if self.cells[0].multi_color:
self.cells[0].colors[
random.randrange(0, len(self.cells[0].colors))
] = media.Color.random()
else:
self.cells[0].multi_color = True
self.cells[0].sprixel.model = self.cells[0].get_random_block()
c = media.Color.random()
if len(self.cells[0].colors) > 1:
self.cells[0].colors[1] = c
else:
self.cells[0].colors.append(c)
elif ch == 2:
pass
# note/chord
# if self.note is None:
# self.note = media.Note.random()
# else:
# self.chord = media.Chord.random()
elif ch == 3:
# moveset
directions = [
constants.UP,
constants.DOWN,
constants.LEFT,
constants.RIGHT,
constants.DLDOWN,
constants.DLUP,
constants.DRDOWN,
constants.DRUP,
]
dirs = list()
for _ in range(0, random.randint(0, 5)):
dirs.append(random.choice(directions))
self.actuator.moveset = self.actuator.moveset + dirs
elif ch == 4:
icurrent = self.initial_lifespan
self.initial_lifespan += random.randint(
round(self.initial_lifespan * Organism.base_lifespan_variation),
round(self.initial_lifespan * Organism.base_lifespan_variation) * 2,
)
self.lifespan += self.initial_lifespan - icurrent
def fitness(self):
mc = 0
if self.cells[0].multi_color:
mc = 1
music = 0
if self.note is not None:
music = 50
if self.chord is not None:
music = 150
score = 0
# return (
# len(self.actuator.moveset) * 5
# + (
# base.Math.distance(
# self.starting_position[0],
# self.starting_position[1],
# self.target.pos[0],
# self.target.pos[1],
# )
# - self.distance_to(self.target)
# )
# * 100
# + 50 * mc
# + music
# + self.initial_lifespan * 20
# )
score += len(self.actuator.moveset) * 5
if self.target is not None:
score += (
base.Math.distance(
self.starting_position[0],
self.starting_position[1],
self.target.pos[0],
self.target.pos[1],
)
- self.distance_to(self.target)
) * 100
score += 50 * mc
score += music
score += self.initial_lifespan * 20
return score
def birth(self):
if self.chord is not None:
self.chord.play()
elif self.note is not None:
self.note.play()
def death(self):
pass
class GeneticMaterial(board_items.GenericActionableStructure):
genetic_material_model = (
graphics.GeometricShapes.CIRCLE_WITH_LOWER_HALF_BLACK
+ graphics.GeometricShapes.CIRCLE_WITH_UPPER_HALF_BLACK
)
def __init__(self, **kwargs):
kwargs["perm"] = constants.NPC_AUTHORIZED
if "type" not in kwargs:
kwargs["type"] = "genetic_material"
super().__init__(**kwargs)
self.set_overlappable(True)
self.set_restorable(False)
self.color = media.Color.random()
if "color" in kwargs and isinstance(kwargs["color"], media.Color):
self.color = kwargs["color"]
self.note = media.Note.random()
if "note" in kwargs and isinstance(kwargs["note"], media.Note):
self.note = kwargs["note"]
self.chord = media.Chord.random()
if "chord" in kwargs and isinstance(kwargs["chord"], media.Chord):
self.chord = kwargs["chord"]
self.directions = list()
if "directions" in kwargs and type(kwargs["directions"]) is list:
self.directions = kwargs["directions"]
else:
directions = [
constants.UP,
constants.DOWN,
constants.LEFT,
constants.RIGHT,
constants.DLDOWN,
constants.DLUP,
constants.DRDOWN,
constants.DRUP,
]
for _ in range(0, random.randint(0, 5)):
self.directions.append(random.choice(directions))
if "sprixel" in kwargs:
self.sprixel = kwargs["sprixel"]
else:
self.sprixel = core.Sprixel(
GeneticMaterial.genetic_material_model,
fg_color=terminal.color_rgb(self.color.r, self.color.g, self.color.b),
is_bg_transparent=True,
)
@classmethod
def random(cls):
return GeneticMaterial()
|
# from genut.models.seq2seq_vae import
from archive.genut import load_prev_state
from archive.genut import RNNLM
from archive.genut.util.argparser import ArgParser
from archive.genut import Tester
from archive.genut import LMTrainer
if __name__ == "__main__":
ap = ArgParser()
opt = ap.parser.parse_args()
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
# Register for logger
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
if opt.dbg is not True:
fileHandler = logging.FileHandler("logger.log")
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
rootLogger.addHandler(consoleHandler)
logging.info('Go!')
if opt.use_cuda and not torch.cuda.is_available():
logging.error('GPU NOT avail.')
elif not torch.cuda.is_available():
logging.warning('GPU NOT avail.')
opt, data_patch = load_dataset(opt)
logging.info(opt)
pretrain_embedding = load_pretrain_word_embedding(opt)
# model = Seq2seq(opt, pretrain_embedding)
model = RNNLM(opt, pretrain_embedding)
if opt.use_cuda:
model = model.cuda()
if opt.load_dir is not None and opt.load_file is not None:
# model.enc = load_prev_state(opt.load_dir + '/' + opt.load_file + '_enc', model.enc)
model.dec = load_prev_state(opt.load_dir + '/' + opt.load_file + '_dec', model.dec)
model.emb = load_prev_state(opt.load_dir + '/' + opt.load_file + '_emb', model.emb)
print("Model Initialized.")
if opt.mode == TEST_FLAG:
model.eval()
lm_test = Tester(opt, model,
data=data_patch,
write_file='_'.join([opt.load_file, 'result']))
ppl = lm_test.test_iters()
logging.info("Evaluation PPL: %f" % ppl)
elif opt.mode == TRAIN_FLAG:
model.train()
s2s_train = LMTrainer(opt, model, data_patch)
try:
s2s_train.train_iters()
except KeyboardInterrupt:
logging.info("Training Interrupted.")
else:
raise RuntimeError
|
from keras.models import Sequential
from keras.layers import Dense, Activation
## https://keras.io/zh/getting-started/sequential-model-guide/
"""
基本的过程
1. 输入数据的尺寸 Dense
2. 配置学习过程 compile 优化器,损失函数,评估标准
3. 加数据 data 训练 fit
"""
# model = Sequential([
# Dense(32, input_shape=(784,)),
# Activation('relu'),
# Dense(10),
# Activation('softmax'),
# ])
# 指定输入数据的尺寸
model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
# 配置学习过程
# 多分类问题
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 二分类问题
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# 均方误差回归问题
model.compile(optimizer='rmsprop',
loss='mse')
# 自定义评估标准函数
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
# 对于具有2个类的单输入模型(二进制分类):
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# 生成虚拟数据
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
# 训练模型,以 32 个样本为一个 batch 进行迭代
model.fit(data, labels, epochs=10, batch_size=32)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 9 21:13:29 2017
@author: Luis Ariel Valbuena
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as pl
import matplotlib.colors as mplc
import numpy as np
#from svmutil import *
import math
import HW2 as hw2
def markovProcess(P,list_sigma,list_mu,N):
p = np.cumsum(P,axis=1)
zact = np.ceil( np.random.uniform()*len(list_mu) ) - 1 #Maybe we need to subtract -1
z = []
for ii in range(N):
a = np.random.uniform()
auxMatrix = p[zact,:] > a
temp = p[zact,:].tolist()
b = 0
temp = auxMatrix.tolist()
listaBooleans = []
for element in temp[0]:
if (element == True):
listaBooleans.append(b)
b = b + 1
if listaBooleans:
zact = np.min(listaBooleans)
z.append(zact)
x = np.zeros(len(z))
distribution = np.random.normal(size = len(z))
c = 0
ret_z = []
for zz in z:
x[c] = distribution[c]*list_sigma[zz] + list_mu[zz]
c = c + 1
ret_z.append(zz+1)
return (x,z)
def estimateTransitionProbabilities(z):
#with indexing starting from 0
N_states = np.max(z) + 1
P_est = np.matrix(np.zeros((N_states,N_states)))
N = len(z)
for ii in range(N-1):
P_est[z[ii],z[ii+1]] = P_est[z[ii],z[ii+1]] + 1
DimX, DimY = P_est.shape
for ii in range(DimX):
summation = np.sum(P_est[ii,:])
P_est[ii,:] = (1.0/summation)*P_est[ii,:]
return P_est
#def calculate_alpha(X,P,List_mu,List_CV):
# alpha = []
# dimX = P.shape[0]
# for n in range(len(X)):
# if(n == 0):
# temp = []
# for jj in range(dimX):
# temp.append(1.0/dimX)
# alpha.append(temp)
# else:
# auxalpha = []
# for jj in range(dimX):
# P_xt_zt_j = hw2.calculate_probability_MultiGaussian([X[n]],List_mu[jj],List_CV[jj])
# temp = 0
# for ii in range(dimX):
# temp = temp + (P[ii,jj]*alpha[n-1][ii])
# auxalpha.append(temp*P_xt_zt_j)
# ttemp = []
# for ee in auxalpha:
# ttemp.append(ee/np.sum(auxalpha))
# alpha.append(ttemp)
#
# return alpha
def calculate_beta(X,P,List_mu,List_CV):
beta = []
for ee in range(len(X)):
beta.append([])
dimX = P.shape[0]
for n in range(len(X)-1,-1,-1):
if( n == len(X)-1):
temp = []
for jj in range(dimX):
temp.append(1.0/dimX)
beta[n] = temp
else:
auxbeta = []
for ii in range(dimX):
temp = 0
for jj in range(dimX):
P_xt_zt_j = hw2.calculate_probability_MultiGaussian([X[n+1]],List_mu[jj],List_CV[jj])
temp = temp + (P_xt_zt_j*P[ii,jj]*beta[n+1][jj])#temp = temp + (P_xt_zt_j*P[ii,jj]*beta[n+1][ii])
auxbeta.append(temp)
ttemp = []
for ee in auxbeta:
ttemp.append(ee/np.sum(auxbeta))
beta[n] = ttemp
return beta
def calculate_alpha_HP(X,P,List_mu,List_CV):
alpha = []
dimX = P.shape[0]
for n in range(len(X)):
if(n == 0):
temp = []
for jj in range(dimX):
temp.append(1.0/dimX)
alpha.append(temp)
else:
vectorPsi = np.matrix(np.zeros((dimX,1)))
Alpha_n_minus_1 = np.matrix(np.zeros((dimX,1)))
for ii in range(dimX):
vectorPsi[ii,0] = hw2.calculate_probability_MultiGaussian([X[n]],List_mu[ii],List_CV[ii])
Alpha_n_minus_1[ii,0] = alpha[n-1][ii]
matrixTemp = (P.T)*Alpha_n_minus_1
Alpha_n = HadamardProduct(vectorPsi,matrixTemp)
Alpha_n = Alpha_n/np.sum(Alpha_n)
temp = Alpha_n.tolist()
ttemp = []
for ii in range(dimX):
ttemp.append(temp[ii][0])
alpha.append(ttemp)
return alpha
def calculate_beta_HP(X,P,List_mu,List_CV):
beta = []
for ee in range(len(X)):
beta.append([])
dimX = P.shape[0]
for n in range(len(X)-1,-1,-1):
if( n == len(X)-1):
temp = []
for jj in range(dimX):
temp.append(1.0/dimX)
beta[n] = temp
else:
vectorPsi = np.matrix(np.zeros((dimX,1)))
Beta_n_plus_1 = np.matrix(np.zeros((dimX,1)))
for ii in range(dimX):
vectorPsi[ii,0] = hw2.calculate_probability_MultiGaussian([X[n+1]],List_mu[ii],List_CV[ii])
Beta_n_plus_1[ii,0] = beta[n+1][ii]
matrixTemp = HadamardProduct(vectorPsi,Beta_n_plus_1)
Beta_n = P*matrixTemp
Beta_n = Beta_n/np.sum(Beta_n)
temp = Beta_n.tolist()
ttemp = []
for ii in range(dimX):
ttemp.append(temp[ii][0])
beta[n] = ttemp
return beta
def HadamardProduct(matrix1,matrix2):
DimX,DimY = matrix1.shape
result = np.matrix(np.zeros((DimX,DimY)))
for ii in range(DimX):
for jj in range(DimY):
result[ii,jj] = matrix1[ii,jj]*matrix2[ii,jj]
return result
def calculate_Forwards_Backwards(alpha,beta):
gamma = []
for vect_alpha, vect_beta in zip(alpha,beta):
temp = []
for alpha_i,beta_i in zip(vect_alpha,vect_beta):
temp.append(alpha_i*beta_i)
ttemp = []
for ee in temp:
ttemp.append(ee/np.sum(temp))
gamma.append(ttemp)
return gamma
def calculate_Matrix_xi_t(X,alpha,beta,P,List_mu,List_CV,n):
dimX = P.shape[0]
xi_t_tplus1 = np.matrix(np.zeros((dimX,dimX)))
for i in range(dimX):
for j in range(dimX):
alpha_t_i = alpha[n][i]
beta_tplus1_j = beta[n+1][j]
P_ij = P[i,j]
P_xt_zt_j = hw2.calculate_probability_MultiGaussian([X[n+1]],List_mu[j],List_CV[j])
xi_t_tplus1[i,j] = alpha_t_i*beta_tplus1_j*P_ij*P_xt_zt_j
xi_t_tplus1[i,:] = xi_t_tplus1[i,:]/np.sum(xi_t_tplus1[i,:])
return xi_t_tplus1
def calculate_Matrix_xi_t_HP(X,alpha,beta,P,List_mu,List_CV,n):
dimX = P.shape[0]
xi_t_tplus1 = np.matrix(np.zeros((dimX,dimX)))
alpha_n = np.matrix(alpha[n])
beta_n_plus_1 = np.matrix(beta[n+1])
vectorPsi_nplus_1 = np.matrix(np.zeros((dimX,1)))
for ii in range(dimX):
vectorPsi_nplus_1[ii,0] = hw2.calculate_probability_MultiGaussian([X[n+1]],List_mu[ii],List_CV[ii])
matrix1 = HadamardProduct(beta_n_plus_1.T,vectorPsi_nplus_1)
matrix2 = alpha_n.T*matrix1.T
matrix3 = HadamardProduct(P,matrix2)
# for ii in range(dimX):
# xi_t_tplus1[ii,:] = matrix3[ii,:]/np.sum(matrix3[ii,:])
xi_t_tplus1 = matrix3
return xi_t_tplus1
def get_Train_Test_Data(X,threshold):
Temp1 = []
Temp2 = []
for elem in X.tolist():
if (np.random.uniform()<= threshold):
Temp1.append(np.matrix([[elem]]))
else:
Temp2.append(np.matrix([[elem]]))
if(len(Temp1)>=len(Temp2)):
XTrain = list(Temp1)
XTest = list(Temp2)
else:
XTrain = list(Temp2)
XTest = list(Temp1)
return (XTrain,XTest)
def EM_GMM(XTrain,XTest,Lista_mu0,Lista_sigma0,v_pi0,N_States):
LogLikelihood_1 = -9999999999
iteration = 1
Lista_mu = list(Lista_mu0)
Lista_sigma = list(Lista_sigma0)
v_pi = list(v_pi0)
while(True):
print "\n\tIteration GMM " + str(iteration)
resultUpdate = hw2.updateParameters(v_pi, XTrain,Lista_mu,Lista_sigma,N_States)
v_pi = resultUpdate[0]
Lista_mu = resultUpdate[1]
Lista_sigma = []
for CVm in resultUpdate[2]:
Lista_sigma.append(np.matrix(CVm))
LogLikelihood = hw2.calculate_LogLikelihood(Lista_mu,Lista_sigma,v_pi,XTest)
iteration = iteration + 1
if(LogLikelihood > LogLikelihood_1)and(iteration <= 20):
LogLikelihood_1 = LogLikelihood
else:
break
print "\nmus"
print Lista_mu
return (Lista_mu,Lista_sigma,v_pi)
def estimate_P(X,arrayEvolAlpha,arrayEvolBeta,N_States):
Evolution_Ximatrices = []
estimation_P =np.matrix(np.zeros((N_States,N_States)))
denominator = np.matrix(np.zeros((N_States,1)))
summation_on_Time = []
for ii in range(len(X)):
observed_values = X[ii]
EvolutionAlpha = arrayEvolAlpha[ii]
EvolutionBeta = arrayEvolBeta[ii]
Xi_matrices = []
temp = np.matrix(np.zeros((N_States,N_States)))
for tt in range(N-1):
xi_t_tplus1 = calculate_Matrix_xi_t(observed_values,EvolutionAlpha,EvolutionBeta,P,Lista_mu,Lista_sigma,tt)
# xi_t_tplus1 = calculate_Matrix_xi_t_HP(observed_values,EvolutionAlpha,EvolutionBeta,P,Lista_mu,Lista_sigma,tt)
Xi_matrices.append(xi_t_tplus1)
temp = temp + xi_t_tplus1
# estimation_P = estimation_P + Xi_matrices[tt]
# denominator = denominator + np.matrix(EvolutionForwBack[tt]).T
summation_on_Time.append(temp)
Evolution_Ximatrices.append(Xi_matrices)
summationTotal = np.matrix(np.zeros((N_States,N_States)))
for Xi_i in summation_on_Time:
summationTotal = summationTotal + Xi_i
denominator = summationTotal[:,0]
for kk in range(1,N_States):
denominator = denominator + summationTotal[:,kk]
for kk in range(N_States):
estimation_P[kk,:] = summationTotal[kk,:]/denominator[kk,0]
return estimation_P
def viterbi(jj,v_pi,List_mu,List_CV,X,P,N):
# print "jj " + str(kk) + ", n " + str(n) + "len X "+str(len(X))+"\n"
result = []
for n in range(N):
if(n == 0):
result.append(v_pi[jj]*hw2.calculate_probability_MultiGaussian([X[n]],List_mu[jj],List_CV[jj]))
else:
LLista = []
for ii in range(P.shape[0]):
temp = hw2.calculate_probability_MultiGaussian([X[n]],List_mu[jj],List_CV[jj])
LLista.append(P[ii,jj]*temp*result[n-1])
result.append(np.max(LLista))
return result
def generateGraphVerification(states_z,EvolutionParameter,title):
parameter_1 = []
parameter_2 = []
parameter_3 = []
for parameter_n in EvolutionParameter:
parameter_1.append(parameter_n[0])
parameter_2.append(parameter_n[1])
parameter_3.append(parameter_n[2])
pl.figure()
N = len(states_z)
# AspectRatio = 2.5
pl.subplot(411)
min_x = 0
max_x = N +1
min_y = np.min(real_states) - 0.5
max_y = np.max(real_states) + 1.5
axesList = [min_x, max_x, min_y, max_y]
markerline, stemlines, baseline = pl.stem(range(1,len(states_z)+1), [zz+1 for zz in states_z] , '-')
pl.setp(baseline, 'color', 'r', 'linewidth', 2)
pl.grid()
# ax = pl.gca()
pl.axis(axesList)
# ax.set_aspect(AspectRatio)
pl.yticks(np.arange(0, 4, 1.0))
pl.xticks(np.arange(0, N + 10, 10))
pl.title("Observed variable Z")
# AspectRatio = 100
pl.subplot(412)
min_x = 0
max_x = N + 1
min_y = -0.05
max_y = 1.2
axesList = [min_x, max_x, min_y, max_y]
markerline, stemlines, baseline = pl.stem(range(1,len(parameter_1)+1), parameter_1, '-')
pl.setp(baseline, 'color', 'r', 'linewidth', 2)
pl.grid()
# ax = pl.gca()
pl.axis(axesList)
# ax.set_aspect(AspectRatio)
pl.xticks(np.arange(0, N + 10, 10))
pl.title(title + "_{1}")
pl.subplot(413)
markerline, stemlines, baseline = pl.stem(range(1,len(parameter_2)+1), parameter_2, '-')
pl.setp(baseline, 'color', 'r', 'linewidth', 2)
pl.grid()
# ax = pl.gca()
pl.axis(axesList)
# ax.set_aspect(AspectRatio)
pl.xticks(np.arange(0, N + 10, 10))
pl.title(title + "_{2}")
pl.subplot(414)
markerline, stemlines, baseline = pl.stem(range(1,len(parameter_3)+1), parameter_3, '-')
pl.setp(baseline, 'color', 'r', 'linewidth', 2)
pl.grid()
# ax = pl.gca()
pl.axis(axesList)
# ax.set_aspect(AspectRatio)
pl.xticks(np.arange(0, N + 10, 10))
pl.title(title + "_{3}")
pl.subplots_adjust(top=0.94, bottom=0.08, left=0.05, right=0.95, hspace=1.15, wspace=0.35)
pl.savefig('Drawings/'+'real States vs '+title+'.eps')
pl.show()
if __name__ == "__main__":
#Parameters for the artificial generated data
P_generation = np.matrix([[0.8, 0.1, 0.1],[0.2, 0.5, 0.3],[0.3, 0.1, 0.6]])
list_mu = [1, 2, 3]
list_sigma = [1/3.0, 1/3.0, 1/3.0]
N = 100
NRealizations = 80
AspectRatio = 12
X = []
Z = []
for ii in range(NRealizations):
observed_values , real_states = markovProcess(P_generation,list_sigma,list_mu,N)
X.append(observed_values)
Z.append(real_states)
targetIndex = 54
obs_vals = X[targetIndex]
real_states = Z[targetIndex]
#Drawing generated artificial data
pl.figure()
min_x = -1
max_x = N + 1
min_y = np.min(obs_vals) - 0.5
max_y = np.max(obs_vals) + 0.5
axesList = [min_x, max_x, min_y, max_y]
markerline, stemlines, baseline = pl.stem(range(1,len(obs_vals)+1), obs_vals , '-')
pl.setp(baseline, 'color', 'r', 'linewidth', 2)
pl.grid()
ax = pl.gca()
pl.axis(axesList)
ax.set_aspect(AspectRatio)
pl.title("Observed variable X")
pl.show()
#Drawing the originial states
pl.figure()
min_x = -1
max_x = N + 1
min_y = np.min(real_states) - 0.5
max_y = np.max(real_states) + 1.5
axesList = [min_x, max_x, min_y, max_y]
markerline, stemlines, baseline = pl.stem(range(1,len(real_states)+1), [zz+1 for zz in real_states] , '-')
pl.setp(baseline, 'color', 'r', 'linewidth', 2)
pl.grid()
ax = pl.gca()
pl.axis(axesList)
ax.set_aspect(AspectRatio)
pl.title("Observed variable Z")
pl.show()
# print "P_generation"
# print P_generation
# print "P__est"
# print P__est
#
# print "Different"
# print P_generation - P__est
# pl.figure()
# pl.hist(observed_values,bins='auto')
# pl.grid()
#Initialization
N_States = 3
#Required estimation requested on item 2.
P__est = np.matrix(np.zeros((N_States,N_States)))
for chain_z in Z:
P__est = P__est + estimateTransitionProbabilities(chain_z)
P__est = P__est/len(Z)
# alpha = []
# beta = []
# for ii in range(N_States):
# alpha.append(1.0/N_States)
# beta.append(1.0)
P = np.matrix(np.random.uniform(size=(N_States,N_States)))
for ii in range(N_States):
P[ii,:] = P[ii,:]/np.sum(P[ii,:])
#generate trial and test
# cc = 1
# XTrain = []
# XTest = []
# for elem in observed_values_1.tolist():
# if (cc % 2 == 1):
# XTrain.append(np.matrix([[elem]]))
# else:
# XTest.append(np.matrix([[elem]]))
arrayLista_mu = []
arrayLista_sigma = []
arrayv_pi = []
Lista_mu = [np.matrix([[1.5]]), np.matrix([[2.2]]), np.matrix([[3.8]])]
Lista_sigma = [np.matrix([[1.0]]), np.matrix([[1.0]]), np.matrix([[1.0]])]
v_pi = [1.0/len(Lista_mu), 1.0/len(Lista_mu), 1.0/len(Lista_mu)]
concatenatedData = []
for arrayy in X:
concatenatedData = np.concatenate((concatenatedData, arrayy))
XTrain, XTest = get_Train_Test_Data(concatenatedData,0.85)
Lista_mu,Lista_sigma,v_pi = EM_GMM(XTrain,XTest,Lista_mu,Lista_sigma,v_pi,N_States)
arrayLista_mu.append(Lista_mu)
arrayLista_sigma.append(Lista_sigma)
arrayv_pi.append(v_pi)
estimation_P = np.matrix(P)
Traj_Lista_mu = []
Traj_Lista_sigma = []
Traj_Lista_v_pi = []
Traj_estimationP = []
Traj_Lista_mu.append(Lista_mu)
Traj_Lista_sigma.append(Lista_sigma)
Traj_Lista_v_pi.append(v_pi)
#%%
for iterator in range(15):
print "\nMain iterator " + str(iterator)
arrayEvolAlpha = []
arrayEvolBeta = []
arrayEvolForwBack = []
for ii in range(len(X)):
observed_values = X[ii]
#E Step
#Testing alpha, beta, forwards-backwards
# EvolutionAlpha = calculate_alpha(observed_values,estimation_P,Lista_mu,Lista_sigma)
EvolutionAlpha = calculate_alpha_HP(observed_values,estimation_P,Lista_mu,Lista_sigma)
EvolutionBeta = calculate_beta(observed_values,estimation_P,Lista_mu,Lista_sigma)
# EvolutionBeta = calculate_beta_HP(observed_values,estimation_P,Lista_mu,Lista_sigma)
EvolutionForwBack = calculate_Forwards_Backwards(EvolutionAlpha,EvolutionBeta)
arrayEvolAlpha.append(EvolutionAlpha)
arrayEvolBeta.append(EvolutionBeta)
arrayEvolForwBack.append(EvolutionForwBack)
if(ii == targetIndex):
generateGraphVerification(real_states,EvolutionAlpha,'Alpha_iteration '+str(iterator))
generateGraphVerification(real_states,EvolutionBeta,'Beta_iteration '+str(iterator))
generateGraphVerification(real_states,EvolutionForwBack,'Gamma_iteration '+str(iterator))
#M Step
#Estimating matrix P
estimation_P = estimate_P(X,arrayEvolAlpha,arrayEvolBeta,N_States)
E_N_1k = np.matrix(np.zeros((N_States,1)))
for array_gamma1 in arrayEvolForwBack[0]:
for kk in range(N_States):
E_N_1k[kk,0] = E_N_1k[kk,0] + array_gamma1[kk]
# E_N_1k = E_N_1k/NRealizations
E_N_1k = E_N_1k/N
v_pi = []
for nn in range(N_States):
v_pi.append(E_N_1k[nn,0])
E_N_j = np.matrix(np.zeros((N_States,1)))
for ii in range(NRealizations):
for t in range(N):
for kk in range(N_States):
E_N_j[kk,0] = E_N_j[kk,0] + arrayEvolForwBack[ii][t][kk]
E_bar_x_k = np.matrix(np.zeros((N_States,1)))
E_bar_xx_k_T = np.matrix(np.zeros((N_States,1)))
for ii in range(NRealizations):
for t in range(N):
for kk in range(N_States):
temp = arrayEvolForwBack[ii][t][kk]*X[ii][t]
E_bar_x_k[kk,0] = E_bar_x_k[kk,0] + temp
E_bar_xx_k_T[kk,0] = E_bar_xx_k_T[kk,0] + X[ii][t]*temp
Lista_mu = []
Lista_sigma = []
for kk in range(N_States):
Lista_mu.append( np.matrix([[ E_bar_x_k[kk,0]/E_N_j[kk,0] ]]) )
Lista_sigma.append( np.matrix( (1/E_N_j[kk,0])*(E_bar_xx_k_T[kk,0] - (E_N_j[kk,0]*Lista_mu[-1]*Lista_mu[-1]) ) ) )
print "\nestimation_P"
print estimation_P
print "\nLista mu"
print Lista_mu
print "\nLista sigma"
print Lista_sigma
print "\nv_pi"
print v_pi
print "\nP_generation"
print P_generation
Traj_Lista_mu.append(Lista_mu)
Traj_Lista_sigma.append(Lista_sigma)
Traj_Lista_v_pi.append(v_pi)
Traj_estimationP.append(estimation_P)
print "\nFinal\n"
print "Traj Lista_mu"
for arrray in Traj_Lista_mu:
print arrray
print "\nTraj Lista_sigma"
for arrray in Traj_Lista_sigma:
print arrray
print "\nTraj Lista_v_pi"
print Traj_Lista_v_pi
print "\nTraj_estimationP"
for matt in Traj_estimationP:
print "\n"
print matt
#%% (jj,v_pi,List_mu,List_CV,X,P,n)
Linea1 = []
Linea2 = []
Linea3 = []
realizationSeq = X[targetIndex]
Linea1.append(viterbi(0,v_pi,Lista_mu,Lista_sigma,realizationSeq,P_generation,N))
Linea2.append(viterbi(1,v_pi,Lista_mu,Lista_sigma,realizationSeq,P_generation,N))
Linea3.append(viterbi(2,v_pi,Lista_mu,Lista_sigma,realizationSeq,P_generation,N))
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
import redis
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
keys = r.keys("*:*:????????????????????????????????????????")
print " ---> %s keys" % len(keys)
for key in keys:
print "Deleting %s" % key
r.delete(key)
def backwards(self, orm):
"Write your backwards methods here."
models = {
}
complete_apps = ['social']
symmetrical = True
|
import discord
import asyncio
from discord.ext import commands
# A class that support navigating pages of embeds.
class Pages:
"""
A paginator that allows using Discord reactions to switch pages in an ordered manner.
By default, there are 5 functionalities:
- `fast_previous`: Return to page 0.
- `previous`: Return to previous page if possible.
- `forward`: Move 1 page forward if possible.
- `fast_forward`: Move to the last page.
- `terminate`: Exist the paginator.
You can customize the emojis for each functionality in the constructor.
To remove a functionality, override `_emoji_list` and override `_on_reaction()`.
"""
def __init__(self, init_page = 0, **kwargs):
"""
Construct a paginator.
Parameters:
- `init_page`: The starting page index. Default is `0`.
- `fast_previous`, `previous`, `forward`, `fast_forward`, `terminate`: The respective emoji for each functionality. These are all optionals.
"""
self.__page_list__ = []
self._current_page = init_page
self._FAST_PREVIOUS = kwargs.get("fast_previous") if kwargs.get("fast_previous") is not None else '⏮️'
self._PREVIOUS = kwargs.get("previous") if kwargs.get("previous") is not None else '◀️'
self._FORWARD = kwargs.get("forward") if kwargs.get("forward") is not None else '▶️'
self._FAST_FORWARD = kwargs.get("fast_forward") if kwargs.get("fast_forward") is not None else '⏭️'
self._TERMINATE = kwargs.get("terminate") if kwargs.get("terminate") is not None else '⏹️'
self._emoji_list = [self._FAST_PREVIOUS, self._PREVIOUS, self._FORWARD, self._FAST_FORWARD, self._TERMINATE]
def add_page(self, page : discord.Embed):
"""
Add a page into the paginator.
Parameter:
- `page`: A `discord.Embed`.
Exception:
- `TypeError`: When `page` is not `discord.Embed`.
"""
if isinstance(page, discord.Embed):
self.__page_list__.append(page)
else:
raise TypeError("'page' must be discord.Embed.")
async def start(self, ctx : commands.Context, message : discord.Message = None, channel : discord.TextChannel = None, interupt = False):
"""
This function is a coroutine.
A function use to start the paginator.
Parameter:
- `ctx`: The context.
- `message`: The message to bind to. If none provided, a new message will be sent. Otherwise, it'll edit the message.
- `channel`: The channel you want the pages to be sent. If none provided, it'll use `ctx.channel`.
- `interupt`: `False` if you don't want other user to react the paginator, `True` otherwise. Default value is `False`.
Exception:
- `AttributeError`: This exception is raised when the parameter(s) is wrong type.
- `discord.Forbidden`: When the bot doesn't have permissions to send messages/add reactions/read message history.
"""
bot = ctx.bot
channel = ctx.channel if channel is None else channel
author = ctx.author
if len(self.__page_list__) == 0:
return
if len(self.__page_list__) == 1:
await ctx.channel.send(embed = self.__page_list__[0])
return
for num in range(0, len(self.__page_list__)):
self.__page_list__[num].set_footer(text = "Page %d/%d" % (num + 1, len(self.__page_list__)))
if message is None:
message = await channel.send(embed = self.__page_list__[self._current_page])
else:
await message.edit(embed = self.__page_list__[self._current_page])
for emoji in self._emoji_list:
await message.add_reaction(emoji)
def reaction_check(reaction, user):
return reaction.message.id == message.id and user != message.author
while True:
try:
reaction, user = await bot.wait_for("reaction_add", check = reaction_check, timeout = 120.0)
except asyncio.TimeoutError:
self._current_page = -1
await message.clear_reactions()
await message.add_reaction('🕛')
break
else:
if interupt or (not interupt and user == author):
if reaction.emoji in self._emoji_list:
terminate = await self._on_reaction(message, reaction)
if terminate:
break
await message.edit(embed = self.__page_list__[self._current_page])
await message.remove_reaction(reaction, user)
async def _on_reaction(self, message, reaction) -> bool:
"""
A method that is called when a reaction is valid (according to `interupt` and author check).
Override this if you want remove a functionality.
Parameter:
- `message`: The message the paginator is belonged to.
- `reaction`: The reaction.
Return:
- `True` if the paginator is reacted with termination, `False` otherwise.
"""
if reaction.emoji == '⏮️':
self._current_page = 0
elif reaction.emoji == '◀️':
if self._current_page != 0:
self._current_page -= 1
elif reaction.emoji == '▶️':
if self._current_page < len(self.__page_list__) - 1:
self._current_page += 1
elif reaction.emoji == '⏭️':
self._current_page = len(self.__page_list__) - 1
elif reaction.emoji == '⏹️':
self._current_page = -1
await self._on_terminate(message)
return True
return False
async def _on_terminate(self, message):
"""
A cleanup function when the paginator is terminated.
Parameter:
- `message`: The message the paginator is belonged to.
"""
await message.edit(content = ":white_check_mark:", embed = None)
await message.clear_reactions()
class MinimalPages(Pages):
"""
A minimal paginator which only has `forward`, `previous`, and `terminate` functionality.
It also silently terminate.
"""
def __init__(self, init_page = 0, **kwargs):
super().__init__(init_page, **kwargs)
self._emoji_list = [self._PREVIOUS, self._FORWARD, self._TERMINATE]
async def _on_reaction(self, message, reaction) -> bool:
if reaction.emoji == self._PREVIOUS:
if self._current_page != 0:
self._current_page -= 1
elif reaction.emoji == self._FORWARD:
if self._current_page < len(self.__page_list__) - 1:
self._current_page += 1
elif reaction.emoji == self._TERMINATE:
await self._on_terminate(message)
return True
return False
async def _on_terminate(self, message):
pass
def listpage_generator(max_item, item_list, title_formatter, item_formatter):
"""
Return a `Pages()` that split the items in the list into different pages.
Important Parameter:
- `max_item`: The maximum amount of items per page.
- `item_list`: List of items to display.
- `title_formatter`: A callback that accept a single item and return a `discord.Embed`.
- `item_formatter`: A callback that accept a `discord.Embed` and a single item. Returns nothing.
"""
page = Pages()
import utilities.facility as Facility
embed = None
for index, item in enumerate(item_list):
if index % max_item == 0:
embed = title_formatter(item)
item_formatter(embed, item)
if index % max_item == max_item - 1:
page.add_page(embed)
embed = None
if embed is not None:
page.add_page(embed)
return page
|
from .MaskEdgeDetector import MaskEdgeDetector
from .TwoLineAverage import TwoLineAverage
from .segment import HoughLines
from .roi import RegionOfInterest
from .clahe import Clahe, ClaheGray
from .cte import CrossTrackError
from .error import Error, CrossTurnError
from .average import MovingAverage
from . import annotation
from . import angle
from . import state
|
# -*- coding: utf-8 -*-
from sqlalchemy.orm import object_session
"""
This module contains the classes for the models.
"""
class BaseModel(object):
"""
BaseModel provides a base object with a set of generic functions
"""
@classmethod
def populate(cls, **kwargs):
"""
Creates an instance of a class and populates it, returning the instance
"""
me = cls()
keys = kwargs.keys()
for key in keys:
me.__setattr__(key, kwargs[key])
return me
class Users(BaseModel):
def __str__(self):
return self.email
def has_access(self, permission):
for role in self.roles:
for perm in role.permissions:
if perm.name == permission:
return True
return False
class MyProxy(BaseModel):
def __str__(self):
return self.myproxy
class Projects(BaseModel):
def __str__(self):
return self.name
class CalculationTipology(BaseModel):
def __str__(self):
return self.name
class Calculations(BaseModel):
def __str__(self):
return self.name
class Jobs(BaseModel):
def __str__(self):
return self.guid
class Role(BaseModel):
def __str__(self):
return self.name
class Permissions(BaseModel):
def __str__(self):
return self.name
class Menu(BaseModel):
def __str__(self):
return self.name
def can_show(self, member):
if member is None:
return False
if len(self.permissions) == 0:
return True;
for permission in self.permissions:
if member.has_access(permission.name):
return True
return False
|
"""
Checks that Pylint does not complain about various
methods on Django model fields.
"""
# pylint: disable=missing-docstring,wrong-import-position
from django.db import models
from django.db.models import ForeignKey, OneToOneField
class Genre(models.Model):
name = models.CharField(max_length=100)
class Author(models.Model):
author_name = models.CharField(max_length=100)
class ISBN(models.Model):
value = models.CharField(max_length=100)
class Book(models.Model):
book_name = models.CharField(max_length=100)
# Check this works with and without `to` keyword
author = models.ForeignKey(to='Author', on_delete=models.CASCADE)
isbn = models.OneToOneField(to=ISBN, on_delete=models.CASCADE)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
def get_isbn(self):
return self.isbn.value
def get_author_name(self):
return self.author.author_name
class Fruit(models.Model):
fruit_name = models.CharField(max_length=20)
class Seed(models.Model):
fruit = ForeignKey(to=Fruit, on_delete=models.CASCADE)
def get_fruit_name(self):
return self.fruit.fruit_name
class User(models.Model):
username = models.CharField(max_length=32)
class UserProfile(models.Model):
user = OneToOneField(User, on_delete=models.CASCADE)
def get_username(self):
return self.user.username
class Human(models.Model):
child = ForeignKey('self', on_delete=models.SET_NULL, null=True)
parent = ForeignKey(to='self', on_delete=models.SET_NULL, null=True)
def get_grandchild(self):
return self.child.child
def get_grandparent(self):
return self.parent.parent
class UserPreferences(models.Model):
"""
Used for testing FK which refers to another model by
string, not model class, see
https://github.com/PyCQA/pylint-django/issues/35
"""
user = ForeignKey('User', on_delete=models.CASCADE)
class UserAddress(models.Model):
user = OneToOneField(to='User', on_delete=models.CASCADE)
line_1 = models.CharField(max_length=100)
line_2 = models.CharField(max_length=100)
city = models.CharField(max_length=100)
postal_code = models.CharField(max_length=100)
|
# Generated by Django 2.1.7 on 2019-04-23 21:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fpiweb', '0011_auto_20190415_0430'),
]
operations = [
migrations.AlterField(
model_name='box',
name='exp_month_end',
field=models.IntegerField(blank=True, help_text='Optional ending month range of when the product expires, if filled.', null=True, verbose_name='Expiration End Month (Optional)'),
),
]
|
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['pname', 'value'])
def glBlendParameteriNV(pname, value):
pass
@params(api='gles3', prms=[])
def glBlendBarrierNV():
pass
|
frase=str(input('Escreva uma frase:')).upper().strip()
print('Analisando...')
print('Nessa frase aparece {} vezes a letra A'.format(frase.count('A')))
print('A primeira letra A apareceu na posição {}'.format(frase.find('A')+1))
print('A última letra A apareceu na posição {}'.format(frase.rfind('A')+1))
|
from django.apps import AppConfig
class BasesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'bases'
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: spaceone/api/identity/plugin/auth.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='spaceone/api/identity/plugin/auth.proto',
package='spaceone.api.identity.plugin',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\'spaceone/api/identity/plugin/auth.proto\x12\x1cspaceone.api.identity.plugin\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\"7\n\x0bInitRequest\x12(\n\x07options\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"w\n\rVerifyRequest\x12(\n\x07options\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12,\n\x0bsecret_data\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0e\n\x06schema\x18\x03 \x01(\t\"\x97\x01\n\x0b\x46indRequest\x12(\n\x07options\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12,\n\x0bsecret_data\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07user_id\x18\x03 \x01(\t\x12\x0f\n\x07keyword\x18\x04 \x01(\t\x12\x0e\n\x06schema\x18\x05 \x01(\t\"\xa9\x01\n\x0cLoginRequest\x12(\n\x07options\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12,\n\x0bsecret_data\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x31\n\x10user_credentials\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0e\n\x06schema\x18\x04 \x01(\t\"\xd4\x01\n\x08UserInfo\x12\x0f\n\x07user_id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12\x0e\n\x06mobile\x18\x04 \x01(\t\x12\r\n\x05group\x18\x05 \x01(\t\x12;\n\x05state\x18\x06 \x01(\x0e\x32,.spaceone.api.identity.plugin.UserInfo.State\">\n\x05State\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45NABLED\x10\x01\x12\x0c\n\x08\x44ISABLED\x10\x02\x12\x10\n\x0cUNIDENTIFIED\x10\x03\"Y\n\tUsersInfo\x12\x37\n\x07results\x18\x01 \x03(\x0b\x32&.spaceone.api.identity.plugin.UserInfo\x12\x13\n\x0btotal_count\x18\x02 \x01(\x05\":\n\x0e\x41uthVerifyInfo\x12(\n\x07options\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"7\n\nPluginInfo\x12)\n\x08metadata\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct2\xf3\x02\n\x04\x41uth\x12]\n\x04init\x12).spaceone.api.identity.plugin.InitRequest\x1a(.spaceone.api.identity.plugin.PluginInfo\"\x00\x12O\n\x06verify\x12+.spaceone.api.identity.plugin.VerifyRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\\\n\x04\x66ind\x12).spaceone.api.identity.plugin.FindRequest\x1a\'.spaceone.api.identity.plugin.UsersInfo\"\x00\x12]\n\x05login\x12*.spaceone.api.identity.plugin.LoginRequest\x1a&.spaceone.api.identity.plugin.UserInfo\"\x00\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_USERINFO_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='spaceone.api.identity.plugin.UserInfo.State',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENABLED', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DISABLED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNIDENTIFIED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=787,
serialized_end=849,
)
_sym_db.RegisterEnumDescriptor(_USERINFO_STATE)
_INITREQUEST = _descriptor.Descriptor(
name='InitRequest',
full_name='spaceone.api.identity.plugin.InitRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.identity.plugin.InitRequest.options', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=187,
)
_VERIFYREQUEST = _descriptor.Descriptor(
name='VerifyRequest',
full_name='spaceone.api.identity.plugin.VerifyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.identity.plugin.VerifyRequest.options', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='secret_data', full_name='spaceone.api.identity.plugin.VerifyRequest.secret_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schema', full_name='spaceone.api.identity.plugin.VerifyRequest.schema', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=189,
serialized_end=308,
)
_FINDREQUEST = _descriptor.Descriptor(
name='FindRequest',
full_name='spaceone.api.identity.plugin.FindRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.identity.plugin.FindRequest.options', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='secret_data', full_name='spaceone.api.identity.plugin.FindRequest.secret_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_id', full_name='spaceone.api.identity.plugin.FindRequest.user_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keyword', full_name='spaceone.api.identity.plugin.FindRequest.keyword', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schema', full_name='spaceone.api.identity.plugin.FindRequest.schema', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=311,
serialized_end=462,
)
_LOGINREQUEST = _descriptor.Descriptor(
name='LoginRequest',
full_name='spaceone.api.identity.plugin.LoginRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.identity.plugin.LoginRequest.options', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='secret_data', full_name='spaceone.api.identity.plugin.LoginRequest.secret_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_credentials', full_name='spaceone.api.identity.plugin.LoginRequest.user_credentials', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='schema', full_name='spaceone.api.identity.plugin.LoginRequest.schema', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=465,
serialized_end=634,
)
_USERINFO = _descriptor.Descriptor(
name='UserInfo',
full_name='spaceone.api.identity.plugin.UserInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='user_id', full_name='spaceone.api.identity.plugin.UserInfo.user_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='spaceone.api.identity.plugin.UserInfo.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='spaceone.api.identity.plugin.UserInfo.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mobile', full_name='spaceone.api.identity.plugin.UserInfo.mobile', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='group', full_name='spaceone.api.identity.plugin.UserInfo.group', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='spaceone.api.identity.plugin.UserInfo.state', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_USERINFO_STATE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=637,
serialized_end=849,
)
_USERSINFO = _descriptor.Descriptor(
name='UsersInfo',
full_name='spaceone.api.identity.plugin.UsersInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='spaceone.api.identity.plugin.UsersInfo.results', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_count', full_name='spaceone.api.identity.plugin.UsersInfo.total_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=851,
serialized_end=940,
)
_AUTHVERIFYINFO = _descriptor.Descriptor(
name='AuthVerifyInfo',
full_name='spaceone.api.identity.plugin.AuthVerifyInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='options', full_name='spaceone.api.identity.plugin.AuthVerifyInfo.options', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=942,
serialized_end=1000,
)
_PLUGININFO = _descriptor.Descriptor(
name='PluginInfo',
full_name='spaceone.api.identity.plugin.PluginInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='metadata', full_name='spaceone.api.identity.plugin.PluginInfo.metadata', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1002,
serialized_end=1057,
)
_INITREQUEST.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_VERIFYREQUEST.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_VERIFYREQUEST.fields_by_name['secret_data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FINDREQUEST.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FINDREQUEST.fields_by_name['secret_data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_LOGINREQUEST.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_LOGINREQUEST.fields_by_name['secret_data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_LOGINREQUEST.fields_by_name['user_credentials'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_USERINFO.fields_by_name['state'].enum_type = _USERINFO_STATE
_USERINFO_STATE.containing_type = _USERINFO
_USERSINFO.fields_by_name['results'].message_type = _USERINFO
_AUTHVERIFYINFO.fields_by_name['options'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_PLUGININFO.fields_by_name['metadata'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['InitRequest'] = _INITREQUEST
DESCRIPTOR.message_types_by_name['VerifyRequest'] = _VERIFYREQUEST
DESCRIPTOR.message_types_by_name['FindRequest'] = _FINDREQUEST
DESCRIPTOR.message_types_by_name['LoginRequest'] = _LOGINREQUEST
DESCRIPTOR.message_types_by_name['UserInfo'] = _USERINFO
DESCRIPTOR.message_types_by_name['UsersInfo'] = _USERSINFO
DESCRIPTOR.message_types_by_name['AuthVerifyInfo'] = _AUTHVERIFYINFO
DESCRIPTOR.message_types_by_name['PluginInfo'] = _PLUGININFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InitRequest = _reflection.GeneratedProtocolMessageType('InitRequest', (_message.Message,), {
'DESCRIPTOR' : _INITREQUEST,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.InitRequest)
})
_sym_db.RegisterMessage(InitRequest)
VerifyRequest = _reflection.GeneratedProtocolMessageType('VerifyRequest', (_message.Message,), {
'DESCRIPTOR' : _VERIFYREQUEST,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.VerifyRequest)
})
_sym_db.RegisterMessage(VerifyRequest)
FindRequest = _reflection.GeneratedProtocolMessageType('FindRequest', (_message.Message,), {
'DESCRIPTOR' : _FINDREQUEST,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.FindRequest)
})
_sym_db.RegisterMessage(FindRequest)
LoginRequest = _reflection.GeneratedProtocolMessageType('LoginRequest', (_message.Message,), {
'DESCRIPTOR' : _LOGINREQUEST,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.LoginRequest)
})
_sym_db.RegisterMessage(LoginRequest)
UserInfo = _reflection.GeneratedProtocolMessageType('UserInfo', (_message.Message,), {
'DESCRIPTOR' : _USERINFO,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.UserInfo)
})
_sym_db.RegisterMessage(UserInfo)
UsersInfo = _reflection.GeneratedProtocolMessageType('UsersInfo', (_message.Message,), {
'DESCRIPTOR' : _USERSINFO,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.UsersInfo)
})
_sym_db.RegisterMessage(UsersInfo)
AuthVerifyInfo = _reflection.GeneratedProtocolMessageType('AuthVerifyInfo', (_message.Message,), {
'DESCRIPTOR' : _AUTHVERIFYINFO,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.AuthVerifyInfo)
})
_sym_db.RegisterMessage(AuthVerifyInfo)
PluginInfo = _reflection.GeneratedProtocolMessageType('PluginInfo', (_message.Message,), {
'DESCRIPTOR' : _PLUGININFO,
'__module__' : 'spaceone.api.identity.plugin.auth_pb2'
# @@protoc_insertion_point(class_scope:spaceone.api.identity.plugin.PluginInfo)
})
_sym_db.RegisterMessage(PluginInfo)
_AUTH = _descriptor.ServiceDescriptor(
name='Auth',
full_name='spaceone.api.identity.plugin.Auth',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1060,
serialized_end=1431,
methods=[
_descriptor.MethodDescriptor(
name='init',
full_name='spaceone.api.identity.plugin.Auth.init',
index=0,
containing_service=None,
input_type=_INITREQUEST,
output_type=_PLUGININFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='verify',
full_name='spaceone.api.identity.plugin.Auth.verify',
index=1,
containing_service=None,
input_type=_VERIFYREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='find',
full_name='spaceone.api.identity.plugin.Auth.find',
index=2,
containing_service=None,
input_type=_FINDREQUEST,
output_type=_USERSINFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='login',
full_name='spaceone.api.identity.plugin.Auth.login',
index=3,
containing_service=None,
input_type=_LOGINREQUEST,
output_type=_USERINFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AUTH)
DESCRIPTOR.services_by_name['Auth'] = _AUTH
# @@protoc_insertion_point(module_scope)
|
x = f"Hello {f' my name is {<caret>'}" |
import numpy as np
import random
import itertools
import Levenshtein
import copy
import pickle
import os
class stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
class TreeSelector():
def __init__(self, max_num_limbs, use_2dof=False, require_code_graph=False):
self.max_num_limbs = max_num_limbs
self.use_2dof = use_2dof
self.codes_graph = {}
self.codes_by_num_limbs = [[] for i in range(max_num_limbs + 1)]
if self.use_2dof:
self.codes_by_num_limbs[0:2] = [["10"], ["1100", "1200"]]
else:
self.codes_by_num_limbs[0:2] = [["10"], ["1100"]]
if require_code_graph:
# If pickle already exists, use it; if not, create one
if use_2dof:
dof = 2
else:
dof = 1
code_list_name = os.path.join("eagent", "data", f"code_data_{max_num_limbs}_{dof}.pkl")
code_graph_name = os.path.join("eagent", "data", f"code_graph_{max_num_limbs}_{dof}.pkl")
if os.path.exists(code_list_name) and os.path.exists(code_graph_name):
print("load codes")
with open(code_list_name, "rb") as f:
self.codes_by_num_limbs = pickle.load(f)
with open(code_graph_name, "rb") as f:
self.codes_graph = pickle.load(f)
else:
print("prepare codes")
# This method creates self.codes_by_num_limbs and self.codes_graph
self.__prepare_codes(max_num_limbs)
with open(code_list_name, "wb") as f:
pickle.dump(self.codes_by_num_limbs, f)
with open(code_graph_name, "wb") as f:
pickle.dump(self.codes_graph, f)
# for BFS
self.searched_set = set([x for x1 in self.codes_by_num_limbs[0:2] for x in x1])
self.current_search_set = set()
self.next_search_set = set()
self.pivot_edges = []
self.pivot_code = ""
def update_pivot(self, edges):
code = self.edges2code(edges)
if code != self.pivot_code:
self.searched_set.add(code)
self.current_search_set = set(self.codes_graph[code])
self.next_search_set = set()
self.pivot_edges = copy.deepcopy(edges)
self.pivot_code = code
# update_pivot must be called before this method
def select_next_edges(self):
while True:
if len(self.current_search_set) == 0:
self.current_search_set = self.next_search_set
self.next_search_set = set()
code = random.choice(list(self.current_search_set))
self.current_search_set.remove(code)
for ns in self.codes_graph[code]:
self.next_search_set.add(ns)
if code not in self.searched_set:
break
self.searched_set.add(code)
edges = self.__code2edges(code)
# At present, edges are assigned random ids
# Change it to match pivot as much as possible
# Create a map for the changes
tree = [[] for i in range(self.max_num_limbs + 1)]
pivot_tree = [[] for i in range(self.max_num_limbs + 1)]
for parent, child, dof in edges:
tree[parent].append([child, dof])
for parent, child, dof in self.pivot_edges:
pivot_tree[parent].append([child, dof])
node_map = self.__compare_trees(pivot_tree, -1, tree, -1, {})
# Apply the map
temp_edges = copy.deepcopy(edges)
num_pending = len([x for x in node_map.values() if x <= -2])
vacant_ids = random.sample(
np.setdiff1d(range(self.max_num_limbs), list(node_map.values())).tolist(),
num_pending
)
edges = []
for temp_parent, temp_child, dof in temp_edges:
parent = node_map[temp_parent]
child = node_map[temp_child]
if parent <= -2:
parent = vacant_ids[-parent - 2]
if child <= -2:
child = vacant_ids[-child - 2]
edges.append([parent, child, dof])
return edges
def select_random_changed_edges(self, old_edges):
parent_nodes = old_edges[:, 0]
child_nodes = old_edges[:, 1]
leaf_nodes = np.setdiff1d(child_nodes, parent_nodes)
cmd_queue = []
for node in child_nodes:
cmd_queue.append([node, "increase"])
for node in leaf_nodes:
cmd_queue.append([node, "decrease"])
while True:
cmd = random.choice(cmd_queue)
if len(old_edges) >= self.max_num_limbs and cmd[1] == "increase":
continue
else:
break
new_edges = []
if cmd[1] == "increase":
vacant_nodes = np.setdiff1d(range(self.max_num_limbs), child_nodes)
new_node_id = random.choice(vacant_nodes)
for i, j, dof in old_edges:
new_edges.append([i, j, dof])
new_edges.append([cmd[0], new_node_id, 1])
elif cmd[1] == "decrease":
for i, j, dof in old_edges:
if j == cmd[0]:
continue
new_edges.append([i, j, dof])
return new_edges
# As a magic number, return a number less than or equal to -2 where we want some id to be assigned later
def __compare_trees(self, base_tree, base_current, new_tree, new_current, node_map):
node_map.update({new_current: base_current})
# [id of node, code of the part below it, boolean of whether it has seen all child nodes yet]
new_children = []
for c, dof in new_tree[new_current]:
new_children.append([c, self.__tree2code(new_tree, c, dof), True])
if base_current < -1:
# Since the new_tree is calculating a part of the tree that is no longer in the base_tree,
# we look at the tree while assigning a new negative id
for new_child, _, _ in new_children:
node_map.update(self.__compare_trees(base_tree, base_current - 1, new_tree, new_child, node_map))
else:
base_children = []
for c, dof in base_tree[base_current]:
base_children.append([c, self.__tree2code(base_tree, c, dof), True])
for i in range(len(base_children)):
base_child, base_child_code, _ = base_children[i]
new_child = None
for j in range(len(new_children)):
temp_new_child, temp_new_child_code, b = new_children[j]
if not b:
continue
if temp_new_child_code == base_child_code:
new_child = temp_new_child
break
if new_child is None:
# There is some kind of change in this child node
# Go through it once and look at it again when finished looking at all the other nodes
pass
else:
base_children[i][2] = False
new_children[j][2] = False
# node_map[new_child] = base_child
node_map.update(self.__compare_trees(base_tree, base_child, new_tree, new_child, node_map))
# Finding nodes left over
base_child_rem = [[child, code] for child, code, b in base_children if b]
new_child_rem = [[child, code] for child, code, b in new_children if b]
if len(new_child_rem) == 0:
# Even if there is a rigid body in base, it will disappear and can be ignored
pass
else:
new_child_patterns = itertools.permutations(new_child_rem)
min_distance = 99999
best_new_child_pattern = []
for new_child_p in new_child_patterns:
dist = 0
for i in range(len(new_child_p)):
if i >= len(base_child_rem):
dist += Levenshtein.distance(new_child_p[i][1], "")
else:
dist += Levenshtein.distance(new_child_p[i][1], base_child_rem[i][1])
for i in range(len(new_child_p), len(base_child_rem)):
dist += Levenshtein.distance("", base_child_rem[i][1])
if dist < min_distance:
min_distance = dist
best_new_child_pattern = new_child_p
for i in range(len(best_new_child_pattern)):
new_child = best_new_child_pattern[i][0]
if i >= len(base_child_rem):
# Insert a value less than any element of node_map (-2 is inserted at the beginning)
node_map.update(self.__compare_trees(
base_tree,
min(min(node_map.values()), -1) - 1,
new_tree,
new_child,
node_map
))
else:
node_map.update(self.__compare_trees(
base_tree, base_child_rem[i][0], new_tree, new_child, node_map))
return node_map
def __prepare_codes(self, n=None):
if n is None:
n = self.max_num_limbs
for i in range(n + 1):
print(i)
if i <= 1:
continue
parent_codes = self.codes_by_num_limbs[i - 1]
child_codes = []
for j in range(len(parent_codes)):
if j % 100000 == 0:
print(f"i: {i}, j: {j} / {len(parent_codes)}")
parent_code = parent_codes[j]
neightbor_codes = self.__generate_neighbor_codes(parent_code)
child_codes.extend([s for s in neightbor_codes if len(s) == (i + 1) * 2])
# make a graph
for neightbor_code in neightbor_codes:
if parent_code in self.codes_graph:
if neightbor_code not in self.codes_graph[parent_code]:
self.codes_graph[parent_code].append(neightbor_code)
else:
self.codes_graph[parent_code] = [neightbor_code]
if neightbor_code in self.codes_graph:
if parent_code not in self.codes_graph[neightbor_code]:
self.codes_graph[neightbor_code].append(parent_code)
else:
self.codes_graph[neightbor_code] = [parent_code]
# list(set()) to remove duplicates
child_codes = list(set(child_codes))
self.codes_by_num_limbs[i] = child_codes
def __code2edges(self, code):
node_cnt = -2
edges = []
# [node id, DoF]
s = stack()
for i in range(len(code)):
if code[i] == "1":
node_cnt += 1
s.push([node_cnt, 1])
elif code[i] == "2":
node_cnt += 1
s.push([node_cnt, 2])
elif code[i] == "0":
child, dof = s.pop()
if not s.is_empty():
parent, _ = s.peek()
edges.append([parent, child, dof])
return np.array(edges)
def __tree2code(self, tree, current, dof):
child_codes = []
for child, d in tree[current]:
child_codes.append(self.__tree2code(tree, child, d))
code = str(dof)
for child_code in sorted(child_codes):
code += child_code
code += "0"
return code
def edges2code(self, edges):
tree = [[] for i in range(self.max_num_limbs + 1)]
for parent, child, dof in edges:
tree[parent].append([child, dof])
return self.__tree2code(tree, -1, 1)
def __generate_neighbor_codes(self, code):
edges = self.__code2edges(code)
parent_nodes = edges[:, 0]
child_nodes = edges[:, 1]
leaf_nodes = np.setdiff1d(child_nodes, parent_nodes)
vacant_nodes = np.setdiff1d(range(self.max_num_limbs), child_nodes)
neighbor_codes = []
if self.use_2dof:
dofs = [1, 2]
else:
dofs = [1]
for c in child_nodes:
# If there is at least one vacant, generate a graph with one more rigid body and store the code
if len(vacant_nodes) > 0:
for dof in dofs:
e = [c, vacant_nodes[0], dof]
added_code = self.edges2code(np.concatenate([edges, [e]]))
neighbor_codes.append(added_code)
# Generate a graph in which one rigid body vanishes and store the code
if c in leaf_nodes:
removed_code = self.edges2code(edges[edges[:, 1] != c])
neighbor_codes.append(removed_code)
# If there is at least one vacant, generate a graph with one new rigid body added to the torso
if len(vacant_nodes) > 0:
for dof in dofs:
e = [-1, vacant_nodes[0], dof]
added_code = self.edges2code(np.concatenate([edges, [e]]))
neighbor_codes.append(added_code)
return list(set(neighbor_codes))
if __name__ == "__main__":
ts = TreeSelector(4, use_2dof=False)
for key in ts.codes_graph:
print("-----")
print(key, ts.codes_graph[key])
# print([len(x) for x in ts.codes_by_num_limbs])
# ts.update_pivot([
# [-1, 0, 1],
# [-1, 1, 1],
# [-1, 2, 1],
# [-1, 3, 1],
# [-1, 4, 1],
# [-1, 5, 1]
# ])
# for _ in range(6):
# for i in range(8):
# edges = ts.select_next_edges()
# print(edges)
# if i == 2:
# new_piv = edges
# ts.update_pivot(new_piv)
|
from .base_installer import FlaskExtInstaller
import os
from ..config import TAB
class FlaskLoginInstaller(FlaskExtInstaller):
package_name = "Flask-Login"
imports = ["from flask_login import LoginManager"]
inits = ["login_manager = LoginManager()"]
attachments = ["login_manager.init_app(app)"]
decorators = [
f"@login_manager.user_loader",
f"def load_user(user_id):",
f"{TAB}# For Flask-Login: This callback is used to reload the user object from the user ID stored in the session.",
f"{TAB}# eg. 'return User.query.get(user_id)'",
f"{TAB}return",
"", # left blank for spacing
]
|
"""empty message
Revision ID: 5ac4e9a16950
Revises: 731438255690
Create Date: 2020-09-27 14:58:05.837249
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5ac4e9a16950'
down_revision = '731438255690'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'users', ['nonce'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='unique')
# ### end Alembic commands ###
|
class BaseQueue(object):
"""
Base class for queues
settings_namespace is a class attribute that will be used to get the needed
parameters to create new queue instance from a settings file.
"""
settings_namespace = None
def report(self, name, metric, value, tags, id_):
raise NotImplemented()
|
from .smc import SMC
try:
from importlib.metadata import version # type: ignore
except ImportError:
from importlib_metadata import version # type: ignore
# from .version import __version__
__author__ = 'Kevin J. Walchko'
__license__ = 'MIT'
__version__ = version("smc")
|
import tempfile
from multiprocessing import Pool, get_context
from itertools import combinations
from typing import List
from . import discovery
from .clustering_utils import generate_global_ranks, process_columns, ingestion_column_generator, process_emd
from ..base_matcher import BaseMatcher
from ..match import Match
from ...data_sources.base_column import BaseColumn
from ...data_sources.base_table import BaseTable
class DistributionBased(BaseMatcher):
"""
A class that contains the data and methods required for the algorithms proposed in
"Automatic Discovery of Attributes in Relational Databases" from M. Zhang et al. [1]
Attributes
----------
threshold1: float
The threshold for phase 1
threshold2: float
The threshold for phase 2
quantiles: int
the number of quantiles of the histograms
process_num: int
The number of processes to spawn
Methods
-------
find_matches(pool, chunk_size)
A dictionary with matches and their similarity
rank_output(attribute_clusters)
Take the attribute clusters that the algorithm produces and give a ranked list of matches based on the the EMD
between each pair inside an attribute cluster
"""
def __init__(self,
threshold1: float = 0.15,
threshold2: float = 0.15,
quantiles: int = 256,
process_num: int = 1):
"""
Parameters
----------
threshold1: float
The threshold for phase 1
threshold2: float
The threshold for phase 2
quantiles: int
the number of quantiles of the histograms
process_num: int
The number of processes to spawn
"""
self.__quantiles: int = int(quantiles)
self.__threshold1: float = float(threshold1)
self.__threshold2: float = float(threshold2)
self.__process_num: int = int(process_num)
self.__column_names: list = []
self.__target_name: str = ""
def get_matches(self,
source_input: BaseTable,
target_input: BaseTable):
"""
Overridden function of the BaseMatcher tha gets the source, the target data loaders and the dataset name.
Next it gives as an output a ranked list of column pair matches.
Returns
-------
dict
A dictionary with matches and their similarity
"""
self.__target_name = target_input.name
all_tables: List[BaseTable] = [source_input, target_input]
with tempfile.TemporaryDirectory() as tmp_folder_path:
data = []
for table in all_tables:
for column in table.get_columns():
data.extend(column.data)
generate_global_ranks(data, tmp_folder_path)
del data
if self.__process_num == 1:
for table in all_tables:
self.__column_names.extend([(table.name, table.unique_identifier,
x.name, x.unique_identifier) for x in table.get_columns()])
columns: List[BaseColumn] = table.get_columns()
for tup in ingestion_column_generator(columns,
table.name,
table.unique_identifier,
self.__quantiles,
tmp_folder_path):
process_columns(tup)
matches = self.__find_matches(tmp_folder_path)
else:
with get_context("spawn").Pool(self.__process_num) as process_pool:
for table in all_tables:
self.__column_names.extend([(table.name, table.unique_identifier,
x.name, x.unique_identifier) for x in table.get_columns()])
columns: List[BaseColumn] = table.get_columns()
process_pool.map(process_columns, ingestion_column_generator(columns,
table.name,
table.unique_identifier,
self.__quantiles,
tmp_folder_path), chunksize=1)
matches = self.__find_matches_parallel(tmp_folder_path, process_pool)
return matches
def __find_matches(self, tmp_folder_path: str):
connected_components = discovery.compute_distribution_clusters(self.__column_names,
self.__threshold1,
tmp_folder_path,
self.__quantiles)
all_attributes = list()
i = 1
for components in connected_components:
if len(components) > 1:
i = i + 1
edges = discovery.compute_attributes(list(components),
self.__threshold2,
tmp_folder_path,
self.__quantiles)
all_attributes.append((list(components), edges))
results = list()
for components, edges in all_attributes:
results.append(discovery.correlation_clustering_pulp(components, edges))
attribute_clusters = discovery.process_correlation_clustering_result(results, self.__column_names)
return self.__rank_output(attribute_clusters, tmp_folder_path)
def __find_matches_parallel(self,
tmp_folder_path: str,
pool: Pool):
"""
"Main" function of [1] that will calculate first the distribution clusters and then the attribute clusters
Parameters
---------
tmp_folder_path: str
The path of the temporary folder that will serve as a cache for the run
pool: multiprocessing.Pool
the process pool that will be used in the algorithms 1, 2 and 3 of [1]
"""
connected_components = discovery.compute_distribution_clusters_parallel(self.__column_names,
self.__threshold1,
pool,
tmp_folder_path,
self.__quantiles)
all_attributes = list()
i = 1
for components in connected_components:
if len(components) > 1:
i = i + 1
edges = discovery.compute_attributes_parallel(list(components),
self.__threshold2,
pool,
tmp_folder_path,
self.__quantiles)
all_attributes.append((list(components), edges))
results = list()
for components, edges in all_attributes:
results.append(discovery.correlation_clustering_pulp(components, edges))
attribute_clusters = discovery.process_correlation_clustering_result(results, self.__column_names)
return self.__rank_output(attribute_clusters, tmp_folder_path)
def __rank_output(self,
attribute_clusters: iter,
tmp_folder_path: str):
"""
Take the attribute clusters that the algorithm produces and give a ranked list of matches based on the the EMD
between each pair inside an attribute cluster . The ranked list will look like:
((table_name1, column_name1), (table_name2, column_name2)): similarity
Parameters
----------
attribute_clusters: list
The attribute clusters
tmp_folder_path: str
The path of the temporary folder that will serve as a cache for the run
Returns
-------
dict
A ranked list that will look like: ((table_name1, column_name1), (table_name2, column_name2)): similarity
"""
matches = {}
for cluster in attribute_clusters:
if len(cluster) > 1:
for combination in combinations(cluster, 2):
table1 = combination[0][0]
table2 = combination[1][0]
if table1 != table2:
k, emd = process_emd(((combination[0], combination[1]),
self.__quantiles,
False,
tmp_folder_path))
sim = 1 / (1 + emd)
tn_i, tguid_i, cn_i, cguid_i = k[0]
tn_j, tguid_j, cn_j, cguid_j = k[1]
if self.__target_name == tn_i:
matches.update(Match(tn_i, cn_i,
tn_j, cn_j,
sim)
.to_dict)
else:
matches.update(Match(tn_j, cn_j,
tn_i, cn_i,
sim)
.to_dict)
return matches
|
import logging
from typing import Any
import rasa.utils.common
from rasa.utils.common import RepeatedLogFilter
def test_repeated_log_filter():
log_filter = RepeatedLogFilter()
record1 = logging.LogRecord(
"rasa", logging.INFO, "/some/path.py", 42, "Super msg: %s", ("yes",), None
)
record1_same = logging.LogRecord(
"rasa", logging.INFO, "/some/path.py", 42, "Super msg: %s", ("yes",), None
)
record2_other_args = logging.LogRecord(
"rasa", logging.INFO, "/some/path.py", 42, "Super msg: %s", ("no",), None
)
record3_other = logging.LogRecord(
"rasa", logging.INFO, "/some/path.py", 42, "Other msg", (), None
)
assert log_filter.filter(record1) is True
assert log_filter.filter(record1_same) is False # same log
assert log_filter.filter(record2_other_args) is True
assert log_filter.filter(record3_other) is True
assert log_filter.filter(record1) is True # same as before, but not repeated
async def test_call_maybe_coroutine_with_async() -> Any:
expected = 5
async def my_function():
return expected
actual = await rasa.utils.common.call_potential_coroutine(my_function())
assert actual == expected
async def test_call_maybe_coroutine_with_sync() -> Any:
expected = 5
def my_function():
return expected
actual = await rasa.utils.common.call_potential_coroutine(my_function())
assert actual == expected
|
import time
__author__ = 'Junior Teudjio'
def timer(function):
def wrapper(*pargs, **kargs):
t0 = time.time()
result = function(*pargs, **kargs)
t1 = time.time()
print function.__name__, ' took %s seconds'% (t1 - t0)
return result
return wrapper
|
from typing import List, Optional
import requests
from arguments import Arguments
from installer import Installer
_URL = "http://0.0.0.0:8080"
_URL_TOOLS = "registry/tools"
_URL_DATASETS = "registry/datasets"
_URL_PIPELINE_REG = "registry/pipelines"
_URL_PIPELINE = "pipeline"
def get_tools(name: Optional[str] = None):
if not name or name.strip() == "":
r = requests.get(f"{_URL}/{_URL_TOOLS}")
print(r.json())
else:
r = requests.get(f"{_URL}/{_URL_TOOLS}/{name}")
print(r.json())
def get_datasets(name: Optional[str] = None):
if not name or name.strip() == "":
r = requests.get(f"{_URL}/{_URL_DATASETS}")
print(r.json())
else:
r = requests.get(f"{_URL}/{_URL_DATASETS}/{name}")
print(r.json())
def get_pipelines(name: Optional[str] = None):
if not name or name.strip() == "":
r = requests.get(f"{_URL}/{_URL_PIPELINE}")
_pipelines = [p["name"] for p in r.json()]
print(_pipelines)
else:
r = requests.get(f"{_URL}/{_URL_PIPELINE_REG}/{name}")
print(r.json())
def get_jobs():
r = requests.get(f"{_URL}/jobs")
print(r.json())
def add_tool(name, author, image, data_repo, code_repo, artefact):
json_out = {
"name": name,
"author": author,
"image": image,
"data_repo": data_repo,
"code_repo": code_repo,
"artefact": artefact,
}
r = requests.post(f"{_URL}/{_URL_TOOLS}", json=json_out)
print(r.json())
def add_dataset(name, url):
json_out = {
"name": name,
"body": {
"master": url,
"nodes": [],
},
}
r = requests.post(f"{_URL}/{_URL_DATASETS}", json=json_out)
print(r.json())
def add_repo(name: Optional[str] = None):
if not name or name.strip() == "":
print("Please provide a repository name")
return None
req = dict()
req["name"] = name
r = requests.post(f"{_URL}/bare-repo/init", json=req)
if r.ok:
print("Successfully initialized a local repo")
else:
print("Repo initialization failed")
print(r.json())
def remove_tool(name: Optional[str] = None):
if not name or name.strip() == "":
print("Please provide a tool name")
return None
r = requests.delete(f"{_URL}/{_URL_TOOLS}/{name}")
print(r.json())
def remove_dataset(name: Optional[str] = None):
if not name or name.strip() == "":
print("Please provide a dataset name")
return None
r: requests.Response = requests.delete(f"{_URL}/{_URL_DATASETS}/{name}")
if r.ok:
print(f"Dataset '{name}' successfully deleted.")
else:
print(f"There was an error deleting '{name}'.")
def stop(id):
r = requests.delete(f"{_URL}/jobs/{id}")
print(r.json())
def __build_pipeline_step() -> dict:
step = dict()
step["name"] = promt_for_valid_identifier("Pleasre provide the pipeline step name. ex: step_1\nStep name: ")
step["tool"] = promt_for_non_empty_str(
"Please enter the tool identifier that is used in the step.\nTool identifier: "
)
step["input_dataset"] = input("Please enter the tool input_dataset.\nTool input_dataset: ")
if step["input_dataset"].strip() == "":
step["input_dataset"] = None
step["output_dataset"] = promt_for_non_empty_str("Please enter the tool output_dataset.\nTool output_dataset: ")
step["docker_socket"] = yes_no_promt("Does the tool require a docker socket?")
# TODO: update these with relevant data.
step["cmd"] = []
step["env"] = {}
return step
def yes_no_promt(message: str) -> bool:
while True:
q = input(f"{message} [y/n]: ")
if q == "y":
return True
elif q == "n":
return False
else:
print("\nPlease choose between y - Yes and n - No\n")
def promt_for_valid_identifier(message: str) -> str:
name_valid: bool = False
name_promt: Optional[str] = f"{message}"
while not name_valid:
name: str = input(name_promt)
name = name.strip()
name_valid = name.isidentifier()
if not name_valid:
print(f"\nName '{name}' cannot be used as an identifier. Please enter a valid name\n")
name_promt = message.split("\n")[-1]
return name
def promt_for_non_empty_str(message: str) -> str:
str_valid: bool = False
while not str_valid:
value: str = input(message)
value = value.strip()
str_valid = value != ""
if not str_valid:
print(f"\nPlease enter a non-empty value\n")
return value
def add_pipeline():
pipeline = dict()
pipeline["name"] = promt_for_valid_identifier(
"Please provide a pipeline name. ex: my_awesome_pipeline\nPipeline name: "
)
pipeline["description"] = input(
"Please provide pipeline description ex. Pipeline to evaluate system performance\nPipeline description: "
)
steps: List[dict] = []
print("\nPlease create the pipeline steps\n")
while len(steps) == 0 or yes_no_promt("Add another step?"):
step: dict = __build_pipeline_step()
steps.append(step)
pipeline["steps"] = steps
r: requests.Response = requests.post(f"{_URL}/pipeline/init", json=pipeline)
if r.ok:
print("Pipeline created successfully")
print(r.json())
else:
resp = r.json()
print("Pipeline creation failed")
print(resp["errors"])
def run_pipeline(name: Optional[str] = None):
if not name or name.strip() == "":
print("Please provide a pipeline name")
return None
req = dict()
req["name"] = name
req["cron"] = None # TODO(fix)
r: requests.Response = requests.post(f"{_URL}/pipeline/run", json=req)
if not r.ok:
print("There was an error running the pipeline")
print(r.json())
if __name__ == "__main__":
arguments = Arguments()
args = arguments.parse_args()
if args.command == "tool":
if args.tool == "get":
if args.name:
get_tools(args.name)
else:
get_tools()
elif args.tool == "add":
add_tool(args.name, args.author, args.image, args.data_repo, args.code_repo, args.artefact)
elif args.tool == "remove":
remove_tool(args.name)
elif args.tool == "list-scheduled":
get_jobs()
elif args.tool == "stop":
stop(args.id)
elif args.command == "instance":
if args.instance == "install":
Installer().install()
elif args.instance == "init":
Installer().initialize()
elif args.command == "dataset":
if args.dataset == "get":
if args.name:
get_datasets(args.name)
else:
get_datasets()
elif args.dataset == "add":
add_dataset(args.name, args.git_url)
elif args.dataset == "remove":
remove_dataset(args.name)
elif args.command == "pipeline":
if args.pipeline == "get":
if args.name:
get_pipelines(args.name)
else:
get_pipelines()
elif args.pipeline == "add":
add_pipeline()
elif args.pipeline == "run":
run_pipeline(args.name)
elif args.command == "repo":
if args.repo == "add":
add_repo(args.name)
|
from provider import OVHProvider
class DNSConfig:
def __init__(self, record, target, provider, config):
if provider == "ovh":
self.provider = OVHProvider(config_file=config)
self.target = target
self.record = record
chunk = str(record).rsplit(".", maxsplit=2)
if len(chunk) < 3:
raise Exception("I'm able to deploy only subdomain")
self.subdomain = chunk[0]
self.zonename = ".".join(chunk[1:])
def deploy(self):
self.provider.deploy_record(zonename=self.zonename, subdomain=self.subdomain, target=self.target)
def delete(self):
self.provider.delete_record(zonename=self.zonename, subdomain=self.subdomain)
def __str__(self):
return "DNSConfig{record=%s,target=%s,provider=%s}" % (self.record,self.target,self.provider)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return self.__str__()
|
import json, re, sys, evaluation
import numpy as np
from keras.layers import Dense, LSTM, Input
from keras.layers.embeddings import Embedding
from keras.models import Model
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.class_weight import compute_sample_weight, compute_class_weight
from utilities import word2vecReader
from utilities import tokenizer
from keras.layers import Layer
from keras import backend as K
from keras import initializers
from wordsegment import load, segment
load()
reload(sys)
sys.setdefaultencoding('utf8')
vocabSize = 10020
tweetLength = 25
posEmbLength = 25
embeddingVectorLength = 200
embeddingPOSVectorLength = 200
charLengthLimit = 20
batch_size = 100
dayMapper = {'Mon': 'DDMON', 'Tue': 'DDTUE', 'Wed': 'DDWED', 'Thu': 'DDTHU', 'Fri': 'DDFRI', 'Sat': 'DDSAT', 'Sun': 'DDSUN'}
POSMapper = {'N': 'N', 'O': 'O', '^': 'AA', 'S': 'S', 'Z': 'Z', 'L': 'L', 'M': 'M',
'V': 'V', 'A': 'A', 'R': 'R', '@': 'BB', '#': 'CC', '~': 'DD', 'E': 'E', ',': 'EE', 'U': 'U',
'!': 'FF', 'D': 'D', 'P': 'P', '&': 'GG', 'T': 'T', 'X': 'X', 'Y': 'Y', '$': 'HH', 'G': 'G'}
POSMapper2 = {'N': 'N', 'O': 'O', 'AA': '^', 'S': 'S', 'Z': 'Z', 'L': 'L', 'M': 'M',
'V': 'V', 'A': 'A', 'R': 'R', 'BB': '@', 'CC': '#', 'DD': '~', 'E': 'E', 'EE': ',', 'U': 'U',
'FF': '!', 'D': 'D', 'P': 'P', 'GG': '&', 'T': 'T', 'X': 'X', 'Y': 'Y', 'HH': '$', 'G': 'G'}
def hourMapper(hour):
input = int(hour)
if 0 <= input < 6:
output = 'HHELMO'
elif 6 <= input < 12:
output = 'HHMORN'
elif 12 <= input < 18:
output = 'HHAFTE'
else:
output = 'HHNIGH'
return output
def cleanContent(input, hashtag=True, breakEmoji=True):
#print input
input = input.replace('\n', ' ').replace('\r', ' ').replace('#', ' #')
input = removeLinks(input)
output = ''
for word in tokenizer.simpleTokenize(input):
if breakEmoji:
emojis1 = re.findall(r'\\u....', word.encode('unicode-escape'))
emojis2 = re.findall(r'\\U........', word.encode('unicode-escape'))
emojis = emojis1 + emojis2
if (not hashtag) and word.startswith('#'):
segTemp = segment(word[1:])
for seg in segTemp:
output += seg + ' '
elif len(emojis) > 0:
for emoji in emojis:
output += emoji + ' '
else:
output += word + ' '
return output.strip().encode('utf-8')
def removeLinks(input):
urls = re.findall("(?P<url>https?://[^\s]+)", input)
if len(urls) != 0:
for url in urls:
input = input.replace(url, '')
return input
def genTimeStr(input, length):
output = ''
for i in range(length):
output += input + ' '
return output.strip()
def copyPadding(inputList, sampleList):
output = []
for index, sampleVector in enumerate(sampleList):
outputVector = []
inputVector = inputList[index]
for i, item in enumerate(sampleVector):
if item == '0':
outputVector.append(0)
else:
outputVector.append(inputVector[i])
output.append(outputVector)
return np.array(output)
def extractPOS(inputList, mode='all', breakEmoji=True):
posOutput = ''
contentOutput = ''
for item in inputList:
if breakEmoji:
emojis1 = re.findall(r'\\u....', item[0].encode('unicode-escape'))
emojis2 = re.findall(r'\\U........', item[0].encode('unicode-escape'))
emojis = emojis1 + emojis2
if len(emojis) > 0:
for emoji in emojis:
contentOutput += emoji + ' '
posOutput += 'PPOOSSE' + ' '
else:
contentOutput += item[0] + ' '
if mode == 'all':
posOutput += 'PPOOSS' + POSMapper[item[1]] + ' '
else:
posOutput += 'PPOOSS' + POSMapper[item[1]] + ' '
else:
contentOutput += item[0] + ' '
if mode == 'all':
posOutput += 'PPOOSS' + POSMapper[item[1]] + ' '
else:
posOutput += 'PPOOSS' + POSMapper[item[1]] + ' '
if len(contentOutput.split(' ')) != len(posOutput.split(' ')):
print('error')
print(contentOutput)
return contentOutput.lower().strip().encode('utf-8'), posOutput.strip().encode('utf-8')
#order=1 -> pos, word
def mixPOS(inputList, mode, order=1):
output = ''
for item in inputList:
if order != 1:
output += item[0] + ' '
if mode == 'all':
output += 'PPOOSS'+POSMapper[item[1]] + ' '
else:
output += 'PPOOSS' + POSMapper[item[1]] + ' '
if order == 1:
output += item[0] + ' '
return output.strip().encode('utf-8')
def processSeqConcatLSTM(modelName, balancedWeight='None', embedding='None', histNum=1, epochs=4, tune=False):
print('Loading...')
resultName = 'result/SeqConcat-HistLSTM_' + modelName + '_' + balancedWeight
histData = {}
histFile = open('data/consolidateHistData_' + modelName + '.json', 'r')
for line in histFile:
data = json.loads(line.strip())
histData[int(data.keys()[0])] = data.values()[0]
totalContents = []
labels = []
places = []
ids = []
contents = []
histContents = []
dataFile = open('data/consolidateData_' + modelName + '.json', 'r')
for line in dataFile:
data = json.loads(line.strip())
if data['id'] in histData:
histTweets = histData[data['id']]
if len(histTweets) >= histNum:
tempHist = []
contents.append(data['content'].encode('utf-8'))
totalContents.append(data['content'].encode('utf-8'))
labels.append(data['label'])
places.append(data['place'])
ids.append(str(data['id']))
for i in range(histNum):
totalContents.append(histTweets[i]['content'].encode('utf-8'))
tempHist.append(histTweets[histNum-1-i]['content'].encode('utf-8'))
histContents.append(tempHist)
places = np.array(places)
ids = np.array(ids)
labelNum = len(np.unique(labels))
labels = np.array(labels)
encoder = LabelEncoder()
encoder.fit(labels)
labelList = encoder.classes_.tolist()
print('Labels: ' + str(labelList))
labelFile = open(resultName + '.label', 'a')
labelFile.write(str(labelList) + '\n')
labelFile.close()
tk = Tokenizer(num_words=vocabSize)
tk.fit_on_texts(totalContents)
tweetSequences = tk.texts_to_sequences(contents)
tweetVector = sequence.pad_sequences(tweetSequences, maxlen=tweetLength, truncating='post', padding='post')
histVectors = []
for tempHist in histContents:
histSequence = tk.texts_to_sequences(tempHist)
tempVector = sequence.pad_sequences(histSequence, maxlen=tweetLength, truncating='post', padding='post')
histVectors.append(tempVector)
dataVector = []
for index, tweet in enumerate(tweetVector):
for i, histTweet in enumerate(histVectors[index]):
if i == 0:
tempSeq = histTweet
else:
tempSeq = np.append(tempSeq, histTweet)
tempSeq = np.append(tempSeq, tweet)
dataVector.append(tempSeq)
dataVector = np.array(dataVector)
print dataVector.shape
if embedding == 'glove':
print ('Loading glove embeddings...')
embeddings_index = {}
embFile = open('../tweetEmbeddingData/glove.twitter.27B.200d.txt', 'r')
for line in embFile:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
embFile.close()
print('Found %s word vectors.' % len(embeddings_index))
word_index = tk.word_index
embMatrix = np.zeros((len(word_index) + 1, 200))
for word, i in word_index.items():
if word in embeddings_index:
embVector = embeddings_index[word]
embMatrix[i] = embVector
elif embedding == 'word2vec':
word_index = tk.word_index
w2v = word2vecReader.Word2Vec()
embModel = w2v.loadModel()
embMatrix = np.zeros((len(word_index) + 1, 400))
for word, i in word_index.items():
if word in embModel:
embMatrix[i] = embModel[word]
# training
print('training...')
if tune:
verbose = 2
else:
verbose = 0
eval = evaluation.evalMetrics(labelNum)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_index, test_index) in enumerate(skf.split(dataVector, labels)):
input = Input(batch_shape=(batch_size, tweetLength*(histNum+1),), name='input')
if embedding in ['glove', 'word2vec']:
embedding = Embedding(len(word_index) + 1, 200, weights=[embMatrix], trainable=True, name='embedding')(input)
else:
embedding = Embedding(vocabSize, embeddingVectorLength)(input)
lstm = LSTM(200, dropout=0.2, recurrent_dropout=0.2, name='lstm')(embedding)
output = Dense(labelNum, activation='softmax', name='output')(lstm)
model = Model(inputs=input, outputs=output)
#print model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
data_train = dataVector[train_index]
labels_train = labels[train_index]
data_test = dataVector[test_index]
labels_test = labels[test_index]
contents_test = np.array(contents)[test_index]
places_test = places[test_index]
ids_test = ids[test_index]
if len(labels_train) % batch_size != 0:
data_train = data_train[:-(len(data_train) % batch_size)]
labels_train = labels_train[:-(len(labels_train) % batch_size)]
if len(labels_test) % batch_size != 0:
data_test = data_test[:-(len(data_test) % batch_size)]
labels_test = labels_test[:-(len(labels_test) % batch_size)]
places_test = places_test[:-(len(places_test) % batch_size)]
ids_test = ids_test[:-(len(ids_test) % batch_size)]
labelVector_train = np_utils.to_categorical(encoder.transform(labels_train))
labelVector_test = np_utils.to_categorical(encoder.transform(labels_test))
if balancedWeight == 'sample':
sampleWeight = compute_sample_weight('balanced', labels_train)
trainHistory = model.fit(data_train, labelVector_train, epochs=epochs, validation_data=(data_test, labelVector_test), batch_size=batch_size, sample_weight=sampleWeight, verbose=verbose)
elif balancedWeight == 'class':
classWeight = compute_class_weight('balanced', np.unique(labels_train), labels_train)
trainHistory = model.fit(data_train, labelVector_train, epochs=epochs, validation_data=(data_test, labelVector_test), batch_size=batch_size, class_weight=classWeight, verbose=verbose)
else:
trainHistory = model.fit(data_train, labelVector_train, epochs=epochs, validation_data=(data_test, labelVector_test), batch_size=batch_size, verbose=verbose)
accuracyHist = trainHistory.history['val_acc']
lossHist = trainHistory.history['val_loss']
tuneFile = open(resultName + '.tune', 'a')
tuneFile.write('Hist Num: ' + str(histNum) + '\n')
for index, loss in enumerate(lossHist):
tuneFile.write(str(index + 1) + '\t' + str(loss) + '\t' + str(accuracyHist[index]) + '\n')
tuneFile.write('\n')
tuneFile.close()
scores = model.evaluate(data_test, labelVector_test, batch_size=batch_size, verbose=0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
predictions = model.predict(data_test, batch_size=batch_size)
sampleFile = open(resultName + '.sample', 'a')
predLabels = []
for index, pred in enumerate(predictions):
predLabel = labelList[pred.tolist().index(max(pred))]
if not tune:
sampleFile.write(ids_test[index] + '\t' + contents_test[index] + '\t' + labels_test[index] + '\t' + predLabel + '\t' + places_test[index] + '\n')
predLabels.append(predLabel)
sampleFile.close()
eval.addEval(scores[1], labels_test, predLabels)
if tune:
break
if not tune:
score, scoreSTD = eval.getScore()
precision, preSTD = eval.getPrecision()
recall, recSTD = eval.getRecall()
f1, f1STD = eval.getF1()
conMatrix = eval.getConMatrix()
confusionFile = open(resultName + '.confMatrix', 'a')
resultFile = open(resultName + '.result', 'a')
for row in conMatrix:
lineOut = ''
for line in row:
lineOut += str(line) + '\t'
confusionFile.write(lineOut.strip() + '\n')
confusionFile.write('\n')
resultFile.write(score + '\t' + scoreSTD + '\n')
resultFile.write(recall + '\t' + recSTD + '\n')
resultFile.write(precision + '\t' + preSTD + '\n')
resultFile.write(f1 + '\t' + f1STD + '\n\n')
confusionFile.close()
resultFile.close()
print(score + ' ' + scoreSTD)
print(recall + ' ' + recSTD)
print(precision + ' ' + preSTD)
print(f1 + ' ' + f1STD)
if __name__ == '__main__':
processSeqConcatLSTM('long1.5', 'none', 'glove', histNum=4, epochs=9, tune=False)
processSeqConcatLSTM('long1.5', 'class', 'glove', histNum=5, epochs=11, tune=False)
|
class Node:
def __init__(self):
self.children: dict[str, Node] = {} # dict[str, Node]
self.value: any = None
def find(node: Node, key: str) -> any:
for char in key:
if char in node.children:
node = node.children[char]
else:
return None
return node.value
def insert(node: Node, key: str, value: any) -> None:
for char in key:
if char not in node.children:
node.children[char] = Node()
node = node.children[char]
node.value = value
if __name__ == '__main__':
trie = Node()
words = ['you', 'are', 'a', 'dull', 'boy', 'dull']
for word in words:
insert(trie, word, word)
print(find(trie, 'a')) |
# Copyright (c) FlowTorch Development Team. All Rights Reserved
# SPDX-License-Identifier: MIT
import torch
import torch.distributions as dist
import torch.optim
import flowtorch
import flowtorch.bijectors
import flowtorch.params
def test_compose():
flow = flowtorch.bijectors.Compose(
[
flowtorch.bijectors.AffineAutoregressive(
flowtorch.params.DenseAutoregressive(),
),
flowtorch.bijectors.AffineAutoregressive(
flowtorch.params.DenseAutoregressive(),
),
flowtorch.bijectors.AffineAutoregressive(
flowtorch.params.DenseAutoregressive(),
),
]
)
event_shape = (5,)
base_dist = dist.Normal(loc=torch.zeros(event_shape), scale=torch.ones(event_shape))
new_dist, flow_params = flow(base_dist)
optimizer = torch.optim.Adam(flow_params.parameters())
assert optimizer.param_groups[0]["params"][0].grad is None
new_dist.log_prob(torch.randn((100,) + event_shape)).sum().backward()
assert optimizer.param_groups[0]["params"][0].grad.abs().sum().item() > 1e-3
optimizer.zero_grad()
assert optimizer.param_groups[0]["params"][0].grad.abs().sum().item() < 1e-3
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the local OEF node implementation."""
import asyncio
import unittest.mock
import pytest
from aea.connections.local.connection import LocalNode, OEFLocalConnection
from aea.mail.base import Envelope, AEAConnectionError, Multiplexer
from aea.protocols.default.message import DefaultMessage
from aea.protocols.default.serialization import DefaultSerializer
from aea.protocols.fipa.message import FIPAMessage
from aea.protocols.fipa.serialization import FIPASerializer
def test_connection():
"""Test that two OEF local connection can connect to a local node."""
with LocalNode() as node:
multiplexer1 = Multiplexer([OEFLocalConnection("multiplexer1", node)])
multiplexer2 = Multiplexer([OEFLocalConnection("multiplexer2", node)])
multiplexer1.connect()
multiplexer2.connect()
multiplexer1.disconnect()
multiplexer2.disconnect()
@pytest.mark.asyncio
async def test_connection_twice_return_none():
"""Test that connecting twice works."""
with LocalNode() as node:
public_key = "public_key"
connection = OEFLocalConnection(public_key, node)
await connection.connect()
await node.connect(public_key, connection._reader)
message = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b"hello")
message_bytes = DefaultSerializer().encode(message)
expected_envelope = Envelope(to=public_key, sender=public_key, protocol_id="default", message=message_bytes)
await connection.send(expected_envelope)
actual_envelope = await connection.receive()
assert expected_envelope == actual_envelope
await connection.disconnect()
@pytest.mark.asyncio
async def test_receiving_when_not_connected_raise_exception():
"""Test that when we try to receive an envelope from a not connected connection we raise exception."""
with pytest.raises(AEAConnectionError, match="Connection not established yet."):
with LocalNode() as node:
public_key = "public_key"
connection = OEFLocalConnection(public_key, node)
await connection.receive()
@pytest.mark.asyncio
async def test_receiving_returns_none_when_error_occurs():
"""Test that when we try to receive an envelope and an error occurs we return None."""
with LocalNode() as node:
public_key = "public_key"
connection = OEFLocalConnection(public_key, node)
await connection.connect()
with unittest.mock.patch.object(connection._reader, "get", side_effect=Exception):
result = await connection.receive()
assert result is None
await connection.disconnect()
def test_communication():
"""Test that two multiplexer can communicate through the node."""
with LocalNode() as node:
multiplexer1 = Multiplexer([OEFLocalConnection("multiplexer1", node)])
multiplexer2 = Multiplexer([OEFLocalConnection("multiplexer2", node)])
multiplexer1.connect()
multiplexer2.connect()
msg = DefaultMessage(type=DefaultMessage.Type.BYTES, content=b"hello")
msg_bytes = DefaultSerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=DefaultMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), ''), 0, 0, FIPAMessage.Performative.CFP, query=None)
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), str(1)), 0, 0, FIPAMessage.Performative.PROPOSE, proposal=[])
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), str(1)), 0, 0, FIPAMessage.Performative.ACCEPT)
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
msg = FIPAMessage((str(0), str(1)), 0, 0, FIPAMessage.Performative.DECLINE)
msg_bytes = FIPASerializer().encode(msg)
envelope = Envelope(to="multiplexer2", sender="multiplexer1", protocol_id=FIPAMessage.protocol_id, message=msg_bytes)
multiplexer1.put(envelope)
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = DefaultSerializer().decode(envelope.message)
assert envelope.protocol_id == "default"
assert msg.get("content") == b"hello"
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.CFP
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.PROPOSE
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.ACCEPT
envelope = multiplexer2.get(block=True, timeout=1.0)
msg = FIPASerializer().decode(envelope.message)
assert envelope.protocol_id == "fipa"
assert msg.get("performative") == FIPAMessage.Performative.DECLINE
multiplexer1.disconnect()
multiplexer2.disconnect()
@pytest.mark.asyncio
async def test_connecting_to_node_with_same_key():
"""Test that connecting twice with the same key works correctly."""
with LocalNode() as node:
public_key = "my_public_key"
my_queue = asyncio.Queue()
ret = await node.connect(public_key, my_queue)
assert ret is not None and isinstance(ret, asyncio.Queue)
ret = await node.connect(public_key, my_queue)
assert ret is None
|
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
from typing import Optional
import numpy as np
from abc import ABC, abstractmethod
class BaseWholeSeller(ABC):
def __init__(self, seed: Optional[int] = None):
if seed is None:
seed = sum([ord(s) for s in "wholeseller"])
self.seed = seed
self.reset_rng()
def reset_rng(self, seed: Optional[int] = None):
if seed is None:
self.rng = np.random.RandomState(self.seed)
else:
self.rng = np.random.RandomState(seed)
@abstractmethod
def act(self) -> float:
raise NotImplementedError
@abstractmethod
def learn(self, quantity: float):
raise NotImplementedError
class RandomWholeSeller(BaseWholeSeller):
def act(self) -> float:
return self.rng.random()
def learn(self, quantity: float):
pass
class ConstantWholeSeller(BaseWholeSeller):
def __init__(self, wholesale_price: float):
self.wholesale_price = wholesale_price
super().__init__()
def act(self) -> float:
return self.wholesale_price
def learn(self, quantity: float):
pass
|
#!/usr/bin/env python3
# Sets are unordered collections of unique elements.
# A list with recurring elements when converted to
# a set will only have elements occuring once.
my_list = [1, 2, 1, 3, 4, 5]
print(my_list)
my_set = set(my_list)
print(my_set)
# The set when called from the python REPL, will be
# similar to a list with curly brackets without any
# elements recurring.
"""
In [1]: my_list = [1, 2, 1, 3, 4, 5]
In [2]: my_list
Out[2]: [1, 2, 1, 3, 4, 5]
In [3]: my_set = set(my_list)
In [4]: my_set
Out[4]: {1, 2, 3, 4, 5} #
"""
list_1 = [1, 2, 3, 4, 4, 6, 1, 2, 5]
set_1 = {x for x in list_1}
print(set_1)
|
import asyncio
from concurrent.futures import ThreadPoolExecutor
from asgiref.sync import sync_to_async
from django.utils import timezone
from merger.mergers import (AgentMerger, ArchivalObjectMerger,
ArrangementMapMerger, ResourceMerger,
SubjectMerger)
from pisces import settings
from transformer.transformers import Transformer
from .helpers import (handle_deleted_uris, instantiate_aspace,
instantiate_electronbond, last_run_time, list_chunks,
send_error_notification)
from .models import FetchRun, FetchRunError
class FetcherError(Exception):
pass
def run_transformer(merged_object_type, merged):
Transformer().run(merged_object_type, merged)
def run_merger(merger, object_type, fetched):
return merger(clients).merge(object_type, fetched)
class BaseDataFetcher:
"""Base data fetcher.
Provides a common run method inherited by other fetchers. Requires a source
attribute to be set on inheriting fetchers.
"""
def fetch(self, object_status, object_type):
self.object_status = object_status
self.object_type = object_type
self.last_run = last_run_time(self.source, object_status, object_type)
global clients
self.processed = 0
self.current_run = FetchRun.objects.create(
status=FetchRun.STARTED,
source=self.source,
object_type=object_type,
object_status=object_status)
self.merger = self.get_merger(object_type)
try:
clients = self.instantiate_clients()
fetched = getattr(
self, "get_{}".format(self.object_status))()
asyncio.get_event_loop().run_until_complete(
self.process_fetched(fetched))
except Exception as e:
self.current_run.status = FetchRun.ERRORED
self.current_run.end_time = timezone.now()
self.current_run.save()
FetchRunError.objects.create(
run=self.current_run,
message="Error fetching data: {}".format(e),
)
raise FetcherError(e)
self.current_run.status = FetchRun.FINISHED
self.current_run.end_time = timezone.now()
self.current_run.save()
if self.current_run.error_count > 0:
send_error_notification(self.current_run)
return self.processed
def instantiate_clients(self):
clients = {
"aspace": instantiate_aspace(settings.ARCHIVESSPACE)
}
if settings.CARTOGRAPHER['cartographer_use']:
clients["cartographer"] = instantiate_electronbond(settings.CARTOGRAPHER)
return clients
async def process_fetched(self, fetched):
tasks = []
to_delete = []
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor()
if self.object_status == "updated":
if self.source == FetchRun.ARCHIVESSPACE:
semaphore = asyncio.BoundedSemaphore(settings.CHUNK_SIZE / self.page_size)
for id_chunk in list_chunks(fetched, self.page_size):
task = asyncio.ensure_future(self.handle_page(id_chunk, loop, executor, semaphore, to_delete))
tasks.append(task)
else:
semaphore = asyncio.BoundedSemaphore(settings.CHUNK_SIZE)
for obj in fetched:
task = asyncio.ensure_future(self.handle_item(obj, loop, executor, semaphore, to_delete))
tasks.append(task)
else:
to_delete = fetched
self.processed = len(fetched)
tasks.append(asyncio.ensure_future(handle_deleted_uris(to_delete, self.source, self.object_type, self.current_run)))
await asyncio.gather(*tasks, return_exceptions=True)
async def handle_page(self, id_list, loop, executor, semaphore, to_delete):
async with semaphore:
page = await self.get_page(id_list)
for obj in page:
await self.handle_data(obj, loop, executor, semaphore, to_delete)
self.processed += 1
async def handle_item(self, identifier, loop, executor, semaphore, to_delete):
async with semaphore:
item = await self.get_item(identifier)
await self.handle_data(item, loop, executor, semaphore, to_delete)
self.processed += 1
async def handle_data(self, data, loop, executor, semaphore, to_delete):
try:
if self.is_exportable(data):
merged, merged_object_type = await loop.run_in_executor(executor, run_merger, self.merger, self.object_type, data)
await loop.run_in_executor(executor, run_transformer, merged_object_type, merged)
else:
to_delete.append(data.get("uri", data.get("archivesspace_uri")))
except Exception as e:
print(e)
await sync_to_async(FetchRunError.objects.create, thread_sensitive=True)(run=self.current_run, message=str(e))
def is_exportable(self, obj):
"""Determines whether the object can be exported.
Unpublished objects should not be exported.
Objects with unpublished ancestors should not be exported.
Resource records whose id_0 field does not begin with FA should not be exported.
"""
if not obj.get("publish"):
return False
if obj.get("has_unpublished_ancestor"):
return False
if obj.get("id_0") and not obj.get("id_0").startswith("FA"):
return False
return True
class ArchivesSpaceDataFetcher(BaseDataFetcher):
"""Fetches updated and deleted data from ArchivesSpace."""
source = FetchRun.ARCHIVESSPACE
page_size = 25
def get_merger(self, object_type):
MERGERS = {
"resource": ResourceMerger,
"archival_object": ArchivalObjectMerger,
"subject": SubjectMerger,
"agent_person": AgentMerger,
"agent_corporate_entity": AgentMerger,
"agent_family": AgentMerger,
}
return MERGERS[object_type]
def get_updated(self):
params = {"all_ids": True, "modified_since": self.last_run}
endpoint = self.get_endpoint(self.object_type)
return clients["aspace"].client.get(endpoint, params=params).json()
def get_deleted(self):
data = []
for d in clients["aspace"].client.get_paged(
"delete-feed", params={"modified_since": self.last_run}):
if self.get_endpoint(self.object_type) in d:
data.append(d)
return data
def get_endpoint(self, object_type):
repo_baseurl = "/repositories/{}".format(settings.ARCHIVESSPACE["repo"])
endpoint = None
if object_type == 'resource':
endpoint = "{}/resources".format(repo_baseurl)
elif object_type == 'archival_object':
endpoint = "{}/archival_objects".format(repo_baseurl)
elif object_type == 'subject':
endpoint = "/subjects"
elif object_type == 'agent_person':
endpoint = "/agents/people"
elif object_type == 'agent_corporate_entity':
endpoint = "/agents/corporate_entities"
elif object_type == 'agent_family':
endpoint = "/agents/families"
return endpoint
async def get_page(self, id_list):
params = {
"id_set": id_list,
"resolve": ["ancestors", "ancestors::linked_agents", "instances::top_container", "linked_agents", "subjects"]}
return clients["aspace"].client.get(self.get_endpoint(self.object_type), params=params).json()
class CartographerDataFetcher(BaseDataFetcher):
"""Fetches updated and deleted data from Cartographer."""
source = FetchRun.CARTOGRAPHER
base_endpoint = "/api/components/"
def get_merger(self, object_type):
return ArrangementMapMerger
def get_updated(self):
data = []
for obj in clients["cartographer"].get(
self.base_endpoint, params={"modified_since": self.last_run}).json()['results']:
data.append("{}{}/".format(self.base_endpoint, obj.get("id")))
return data
def get_deleted(self):
data = []
for deleted_ref in clients["cartographer"].get(
'/api/delete-feed/', params={"deleted_since": self.last_run}).json()['results']:
if self.base_endpoint in deleted_ref['ref']:
data.append(deleted_ref.get('archivesspace_uri'))
return data
async def get_item(self, obj_ref):
return clients["cartographer"].get(obj_ref).json()
|
#!/usr/bin/env python2
# Copyright (C) 2013:
# Gabes Jean, naparuba@gmail.com
# Pasche Sebastien, sebastien.pasche@leshop.ch
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
'''
This script is a check for lookup at memory consumption over ssh without
having an agent on the other side
'''
import os
import sys
import optparse
import base64
import subprocess
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
DEFAULT_WARNING = '75%'
DEFAULT_CRITICAL = '90%'
def get_meminfo(client):
# get raw mem info
stdin, stdout, stderr = client.exec_command('LC_ALL=C cat /proc/meminfo')
# init data
total = used = free = shared = buffed = cached = 0
# first create a dict
meminfo = {}
for line in stdout:
raw = filter(None, line.strip().split(" "))
if len(raw) < 3:
raw.append("")
raw[0] = raw[0].replace(":","")
meminfo[raw[0]] = {
"value": int(raw[1]),
"unit": raw[2]
}
client.close()
# get and compute data
# According to https://access.redhat.com/solutions/406773
# Note: shared is still there but forced to zero for backward compatibility (no meaning)
total = meminfo["MemTotal"]["value"]
free = meminfo["MemFree"]["value"]
used = total - free
buffed = meminfo["Buffers"]["value"]
cached = meminfo["Cached"]["value"]
swap_total = meminfo["SwapTotal"]["value"]
swap_free = meminfo["SwapFree"]["value"]
swap_used = swap_total - swap_free
shared = 0
return total, used, free, shared, buffed, cached, swap_total, swap_used, swap_free
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-H', '--hostname',
dest="hostname", help='Hostname to connect to')
parser.add_option('-p', '--port',
dest="port", type="int", default=22,
help='SSH port to connect to. Default : 22')
parser.add_option('-i', '--ssh-key',
dest="ssh_key_file",
help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('-u', '--user',
dest="user", help='remote use to use. By default shinken.')
parser.add_option('-P', '--passphrase',
dest="passphrase", help='SSH key passphrase. By default will use void')
parser.add_option('-m', '--measurement',
dest="measurement",action="store_true",default=False,
help='Measurement in absolute value of the memory behavior. Absolute value '
'currently can not be used as a check')
parser.add_option('-s', '--swap',
dest="swap",action="store_true",default=False,
help='Enable swap value measurement. Swap value currently can not be used '
'as a check')
parser.add_option('-w', '--warning',
dest="warning",
help='Warning value for physical used memory. In percent. Default : 75%')
parser.add_option('-c', '--critical',
dest="critical",
help='Critical value for physical used memory. In percent. Must be '
'superior to warning value. Default : 90%')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
port = opts.port
hostname = opts.hostname or ''
ssh_key_file = opts.ssh_key_file or os.path.expanduser('~/.ssh/id_rsa')
user = opts.user or 'shinken'
passphrase = opts.passphrase or ''
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICAL
warning, critical = schecks.get_warn_crit(s_warning, s_critical)
# Ok now connect, and try to get values for memory
client = schecks.connect(hostname, port, ssh_key_file, passphrase, user)
total, used, free, shared, buffed, cached, swap_total, swap_used, swap_free = get_meminfo(client)
# Maybe we failed at getting data
if total == 0:
print "Error : cannot fetch memory values from host"
sys.exit(2)
# Ok analyse data
pct_used = 100 * float(used - buffed - cached) / total
pct_used = int(pct_used)
d = {'used':used, 'buffered':buffed, 'cached':cached, 'free':free, 'consumed': used - buffed - cached}
perfdata = ''
for (k,v) in d.iteritems():
# For used we sould set warning,critical value in perfdata
_warn, _crit = '', ''
if k == 'consumed':
_warn, _crit = str(warning)+'%', str(critical)+'%'
perfdata += ' %s=%s%%;%s;%s;0%%;100%%' % (k, int(100 * float(v)/total), _warn, _crit)
# Add swap if required (actually no check supported)
if opts.swap and swap_total > 0:
d_swap = {'swap_used':swap_used, 'swap_free':swap_free}
for (k,v) in d_swap.iteritems():
## manage division by zero, this is if the host doesn't have swap
try:
perfdata += ' %s=%s%%;;;0%%;100%%' % (k, int(100 * float(v)/swap_total))
except ZeroDivisionError:
print('The server either not have swap or that partition isn\'t mounted!')
# Add measurement if required (actually no check supported) + total
if opts.measurement :
d['total']=total
for (k,v) in d.iteritems():
perfdata += ' %s=%sKB;;;0KB;%sKB' % (k+'_abs', v, total)
if opts.swap and swap_total > 0:
d_swap['swap_total']=swap_total
for (k,v) in d_swap.iteritems():
perfdata += ' %s=%sKB;;;0KB;%sKB' % (k, v, swap_total)
# And compare to limits
if pct_used >= critical:
print "Critical : memory consumption is too high %s%% | %s" % (pct_used, perfdata)
sys.exit(2)
if pct_used >= warning:
print "Warning : memory consumption is very high %s%% | %s" % (pct_used, perfdata)
sys.exit(1)
print "Ok : memory consumption is %s%% | %s" % (pct_used, perfdata)
sys.exit(0)
|
#!/usr/bin/env python
from atip.common.steps import *
from atip.web.steps import *
import time
import datetime
import md5
import Image
# get md5 of a input string
def get_string_md5(str):
m = md5.new()
m.update(str)
return m.hexdigest()
# standardize the image
def make_regalur_image(img, size=(256, 256)):
return img.resize(size).convert('RGB')
def hist_similar(lh, rh):
assert len(lh) == len(rh)
return sum(1 - (0 if l == r else float(abs(l - r)) / max(l, r))
for l, r in zip(lh, rh)) / len(lh)
# Images similarity calculation
def cal_images_similar(img1, img2):
# open two images
fimg1 = Image.open(img1)
fimg2 = Image.open(img2)
# img2=Image.open("./pic.png")
# regular the image
reg_img1 = make_regalur_image(fimg1)
reg_img2 = make_regalur_image(fimg2)
# calculate the similar
ret = hist_similar(reg_img1.histogram(), reg_img2.histogram())
return ret * 100
@step(u'I press "{key_prefix}" for {n:d} times cyclically')
def i_press_cycle(context, key_prefix, n):
for i in range(1, n + 1):
for j in range(i, n + 1):
key1 = key_prefix + str(i)
# print "key:" + key1
assert context.app.press_element_by_key(key1)
time.sleep(1)
key2 = key_prefix + str(j)
assert context.app.press_element_by_key(key2)
time.sleep(0.1)
@step(u'I verify "{text}" with link "{link}"')
def check_link_by_text(context, link, text):
element = context.app.driver.find_element_by_link_text(text)
# print "hyperlink:" , element.get_attribute('href')
# print "link:", link
hyperlink = element.get_attribute('href')
#assert element.get_attribute('href') == link
if hyperlink == link:
assert True
else:
assert False
@step(u'I click button with class "{classname}" and text "{text}"')
def click_button_by_class_and_text(context, classname, text):
elements = context.app.driver.find_elements_by_class_name(classname)
length = len(elements)
for i in range(0, length):
# print "loop i:", i
if elements[i].text == "START":
# print "clicki:", i
# print "text:", elements[i].text
elements[i].click()
assert True
@step(u'I wait {n:d} seconds')
def wait_senconds(context, n):
time.sleep(n)
@step(u'I check screenshot should be "{exp_md5}"')
def i_check_screenshot_base64_md5(context, exp_md5):
pic_base64 = context.app.driver.get_screenshot_as_base64()
# context.app.driver.get_screenshot_as_file("/home/cici/webdriver/auto/wrt-sampleapp-android-tests/testscripts/pic.png")
pic_md5 = get_string_md5(pic_base64)
print "pic_md5", pic_md5
print "exp_md5", exp_md5
if pic_md5 == exp_md5:
assert True
else:
assert False
@step(
u'I check screenshot "{img}" should have {percent:d} similarity with "{baseline_img}"')
def i_check_screenshot(context, img, baseline_img, percent):
context.app.driver.get_screenshot_as_file(img)
# print "screenshot:", img
# print "baseline img:", baseline_img
similarity = cal_images_similar(img, baseline_img)
print "similarity:", similarity
if similarity > percent:
print "similarity:", similarity
assert True
else:
assert False
def compare_values(value1, value2, num):
times = value2 / value1
if times >= num:
assert True
else:
assert False
@step(
u'I check "{eid}" is {num:f} times after click "{eid2}" for {nsec:d} seconds')
def I_compare_values(context, eid, num, eid2, nsec):
element = context.app.driver.find_element_by_id(eid)
text = element.text
value_start = float(text)
element = context.app.driver.find_element_by_id(eid2)
# print "element2:", element.text
element.click()
time.sleep(nsec)
element = context.app.driver.find_element_by_id(eid)
text = element.text
value_end = float(text)
compare_values(value_start, value_end, num)
|
import numpy as np
def my_nb(x,gnd):
a,b = x.shape
my_lab = np.unique(gnd)
NumOfClass = my_lab.size
pw = np.zeros([NumOfClass])
my_m = np.zeros([a,NumOfClass,1])
my_std = np.zeros([a,NumOfClass,1])
for i in range(NumOfClass):
temp = np.sum(gnd==my_lab[i])
pw[i] = temp/gnd.size
for i in range(a):
for j in range(NumOfClass):
tmpX = x[i][np.where(gnd==my_lab[j])]
my_m[i][j][0] = np.mean(tmpX)
my_std[i][j][0] = np.std(tmpX)
tmpX = []
return pw,my_m,my_std
|
import os
import pandas as pd
def getCSVsAsList():
"""
get all parsable CSV in project as full path
:return: string list
"""
fileList = []
# CSVs to skip because country name is not a PK
csvSkipList = [
"university_rankings.csv",
"traffic_index.csv",
]
for root, _unuseddirs, files in os.walk(".."):
for name in files:
if name.find(".csv") != -1 and not (any(f in name for f in csvSkipList)):
fileList.append(root + "\\" + name)
return fileList
def findElementInList(passedList, elements):
"""
:param passedList: column names of a data frame
:param elements: elements to look for in the column names
:return: index of first element found in the data frame column names
"""
for i, item in enumerate(passedList):
for element in elements:
if str(item).lower() == str(element).lower():
return i
def grabData(countryName):
"""
:param countryName: country name as string (case insensitive)
:return: data frame with country data
"""
CSVs = getCSVsAsList()
df = pd.DataFrame()
for CSV in CSVs:
print(CSV)
data = pd.read_csv(CSV)
# column names used to describe the country names
columnNames = ["country", "name"]
# column names of the data frame
dataColumns = list(data.columns.values)
country_col_index = findElementInList(dataColumns, columnNames)
colName = data.columns[country_col_index]
whereCountry = data[colName].str.lower() == countryName.lower()
data = data.where(whereCountry).dropna()
if df.empty:
df = data
key = colName
else:
df = df.join(
data.set_index([colName], verify_integrity=True),
on=[key], how='left',
)
return df
grabData("france")
|
from tkinter import Frame, OptionMenu, StringVar, Tk, Entry, Button, Label, Text, END, Y
from tkinter.filedialog import askopenfilename
from tkinter.messagebox import showinfo
from matplotlib import pyplot as plt
from pandas import read_csv, read_excel
from os.path import basename
# Main Window
root = Tk()
root.configure(background='#070091')
root.title("Data Plotter")
root.iconbitmap("./icon.ico")
root.minsize(900, 720)
root.state('zoomed')
theme = "dark"
# Frame
plots_frame = Frame(root)
plots_frame.place(x=30, y=240)
# Table Array
table_array: list[list[Entry]] = []
class DataPoint:
def setFile(self, file: str):
self.data_file = file
self.basename_title = basename(file).replace(".csv",
"").replace(".xlsx", "")
def setCol1(self, col1: str):
self.column1 = col1
def setCol2(self, col2: str):
self.column2 = col2
def setLabel(self, label: str):
self.label = label
def getFile(self):
data_table.update_from_table()
file = askopenfilename(filetypes=[("Excel Files",
"*.xlsx"), ("CSV Files", "*.csv")])
if file == "":
return
else:
self.data_file = file
self.basename_title = basename(file).replace(".csv", "").replace(
".xlsx", "")
try:
file = read_file(self.data_file)
if len(file) > 2500:
showinfo(
"Data Plotter",
"Number of rows greater than 2500. Please reduce the number of rows to avoid"
"performance issues")
return
except:
showinfo("Data Plotter", "Error Parsing File")
return
preview.delete(1.0, END)
preview.insert(1.0, read_file(self.data_file))
data_table.update_from_dataset()
# Data Set
dataset: list[DataPoint] = []
class Table:
def update_from_dataset(self):
global plots_frame
table_array.clear()
plots_frame.destroy()
plots_frame = Frame(root)
plots_frame.place(x=30, y=240)
for i in range(len(dataset)):
table_array.append([])
for j in range(5):
if j != 4:
if theme == "dark":
entry = Entry(plots_frame,
width=16,
font=("Arial", "16"),
foreground="white",
background="black")
else:
entry = Entry(plots_frame,
width=16,
font=("Arial", "16"),
foreground="black",
background="white")
entry.grid(row=i, column=j)
if j == 0:
entry.insert(END, dataset[i].label)
elif j == 1:
entry.insert(END, dataset[i].column1)
elif j == 2:
entry.insert(END, dataset[i].column2)
else:
entry.insert(END, dataset[i].data_file)
table_array[i].append(entry)
else:
if theme == "dark":
button = Button(plots_frame,
text="Open File",
relief="groove",
activebackground="gray",
activeforeground="white",
foreground="white",
background="black",
command=dataset[i].getFile,
borderwidth=4)
else:
button = Button(plots_frame,
text="Open File",
relief="groove",
activebackground="gray",
activeforeground="black",
foreground="black",
background="white",
command=dataset[i].getFile,
borderwidth=4)
button.grid(row=i, column=j)
def update_from_table(self):
for i in range(len(table_array)):
for j in range(4):
if j == 0:
dataset[i].setLabel(table_array[i][j].get())
elif j == 1:
dataset[i].setCol1(table_array[i][j].get())
elif j == 2:
dataset[i].setCol2(table_array[i][j].get())
else:
dataset[i].setFile(table_array[i][j].get())
data_table = Table()
data_table.update_from_dataset()
# Preview
preview = Text(root,
wrap="none",
font=('Arial', '14'),
height=864,
background='#c704ad',
foreground='white')
preview.pack(expand=True, fill=Y)
preview.place(x=900, y=0)
# Labels
text1 = Label(root,
text="Label",
font=('Arial', '16'),
bg='#070091',
fg='white')
text1.place(x=100, y=180)
text2 = Label(root,
text="Primary/Column 1",
font=('Arial', '16'),
bg='#070091',
fg='white')
text2.place(x=240, y=180)
text3 = Label(root,
text="Secondary/Column 2",
font=('Arial', '16'),
bg='#070091',
fg='white')
text3.place(x=420, y=180)
text4 = Label(root,
text="File Path",
font=('Arial', '16'),
bg='#070091',
fg='white')
text4.place(x=680, y=180)
# Dropdown Menu
graph_label = StringVar(root, "Select Graph")
graphs = OptionMenu(
root, graph_label, *[
"Line Graph", "Bar Graph", "Horizontal Bar Graph", "Pie Chart",
"Scatter Plot", "Area Chart"
])
graphs.place(x=400, y=125)
graphs.config(bg="BLACK", fg="WHITE")
graphs.config(activebackground="BLACK", activeforeground="WHITE")
graphs["menu"].config(bg="BLACK", fg="WHITE")
graphs["highlightthickness"] = 0
theme_label = StringVar(root, "Select Graph Theme")
themes = OptionMenu(root, theme_label,
*[style for style in plt.style.available])
themes.place(x=720, y=30)
themes.config(bg="BLACK", fg="WHITE")
themes.config(activebackground="BLACK", activeforeground="WHITE")
themes["menu"].config(bg="BLACK", fg="WHITE")
themes["highlightthickness"] = 0
def read_file(filename: str):
if filename.endswith(".xlsx"):
return read_excel(filename)
else:
return read_csv(filename)
def createGraph() -> bool:
plt.clf()
if graph_label.get() == "Bar Graph":
for datapoint in dataset:
if datapoint.data_file == "":
showinfo("Data Plotter", "Please select a data file")
return False
file = read_file(datapoint.data_file)
try:
plt.bar(file[datapoint.column1],
file[datapoint.column2],
label=datapoint.label)
except:
showinfo("Data Plotter", "One or more columns didn't exist")
return
plt.xlabel(datapoint.column1.title())
plt.ylabel(datapoint.column2.title())
plt.legend()
elif graph_label.get() == "Line Graph":
for datapoint in dataset:
if datapoint.data_file == "":
showinfo("Data Plotter", "Please select a data file")
return False
file = read_file(datapoint.data_file)
try:
plt.plot(file[datapoint.column1],
file[datapoint.column2],
label=datapoint.label)
except:
showinfo("Data Plotter", "One or more columns didn't exist")
return
plt.xlabel(datapoint.column1.title())
plt.ylabel(datapoint.column2.title())
plt.legend()
elif graph_label.get() == "Horizontal Bar Graph":
if len(table_array) > 1:
showinfo(
"Data Plotter",
"Horizontal Bar Graph takes only the first set of data given")
if dataset[0].data_file == "":
showinfo("Data Plotter", "Please select a data file")
return False
file = read_file(dataset[0].data_file)
try:
plt.barh(file[dataset[0].column2],
file[dataset[0].column1],
label=dataset[0].label)
except:
showinfo("Data Plotter", "One or more columns didn't exist")
return
plt.xlabel(dataset[0].column2.title())
plt.ylabel(dataset[0].column1.title())
elif graph_label.get() == "Pie Chart":
if len(table_array) > 1:
showinfo("Data Plotter",
"Pie Chart takes only the first set of data given")
if dataset[0].data_file == "":
showinfo("Data Plotter", "Please select a data file")
return False
file = read_file(dataset[0].data_file)
try:
plt.pie(file[dataset[0].column1], labels=file[dataset[0].column2])
except:
showinfo("Data Plotter", "One or more columns didn't exist")
return
elif graph_label.get() == "Scatter Plot":
for datapoint in dataset:
if datapoint.data_file == "":
showinfo("Data Plotter", "Please select a data file")
return False
file = read_file(datapoint.data_file)
try:
plt.scatter(file[datapoint.column1],
file[datapoint.column2],
label=datapoint.label)
except:
showinfo("Data Plotter", "One or more columns didn't exist")
plt.xlabel(datapoint.column1.title())
plt.ylabel(datapoint.column2.title())
plt.legend()
elif graph_label.get() == "Area Chart":
for datapoint in dataset:
if datapoint.data_file == "":
showinfo("Data Plotter", "Please select a data file")
return False
file = read_file(datapoint.data_file)
try:
plt.stackplot(file[datapoint.column1],
file[datapoint.column2],
labels=[datapoint.label])
except:
showinfo("Data Plotter", "One or more columns didn't exist")
return
plt.xlabel(datapoint.column1.title())
plt.ylabel(datapoint.column2.title())
plt.legend(loc='upper left')
else:
raise Exception("GraphValue not recognized")
return True
def graphWindow():
plt.get_current_fig_manager().window.wm_iconbitmap("./icon.ico")
plt.get_current_fig_manager().set_window_title(
f"Data Plotter - {graph_label.get()}")
plt.title(f"Data Plotter - {graph_label.get()}")
if theme_label.get() == "Select Graph Theme":
plt.style.use('bmh')
else:
plt.style.use(theme_label.get())
def plotGraph():
if graph_label.get() == "Select Graph":
showinfo("Data Plotter", "Select type of graph")
else:
data_table.update_from_table()
graphWindow()
if createGraph():
plt.show()
def addRow():
data_table.update_from_table()
if len(dataset) >= 10:
showinfo("Data Plotter", "Can't add more than 10 plots")
return
dataset.append(DataPoint())
data_table.update_from_dataset()
def changeTheme():
global theme
global data_table
theme = "dark" if theme == "light" else "light"
data_table.update_from_table()
data_table.update_from_dataset()
if theme == "light":
graphs.config(bg="WHITE", fg="BLACK")
graphs.config(activebackground="WHITE", activeforeground="BLACK")
graphs["menu"].config(bg="WHITE", fg="BLACK")
themes.config(bg="WHITE", fg="BLACK")
themes.config(activebackground="WHITE", activeforeground="BLACK")
themes["menu"].config(bg="WHITE", fg="BLACK")
arrowButton.config(activebackground="GRAY", activeforeground="BLACK")
arrowButton.config(background="WHITE", foreground="BLACK")
plotButton.config(activebackground="GRAY", activeforeground="BLACK")
plotButton.config(background="WHITE", foreground="BLACK")
minusButton.config(activebackground="GRAY", activeforeground="BLACK")
minusButton.config(background="WHITE", foreground="BLACK")
themeButton.config(activebackground="GRAY", activeforeground="BLACK")
themeButton.config(background="WHITE", foreground="BLACK")
root.configure(background='#2185ff')
text1.config(background="#2185ff", foreground="black")
text2.config(background="#2185ff", foreground="black")
text3.config(background="#2185ff", foreground="black")
text4.config(background="#2185ff", foreground="black")
preview.config(background="#ff47ed", foreground="black")
else:
graphs.config(bg="BLACK", fg="WHITE")
graphs.config(activebackground="BLACK", activeforeground="WHITE")
graphs["menu"].config(bg="BLACK", fg="WHITE")
themes.config(bg="BLACK", fg="WHITE")
themes.config(activebackground="BLACK", activeforeground="WHITE")
themes["menu"].config(bg="BLACK", fg="WHITE")
arrowButton.config(activebackground="GRAY", activeforeground="WHITE")
arrowButton.config(background="BLACK", foreground="WHITE")
plotButton.config(activebackground="GRAY", activeforeground="WHITE")
plotButton.config(background="BLACK", foreground="WHITE")
minusButton.config(activebackground="GRAY", activeforeground="WHITE")
minusButton.config(background="BLACK", foreground="WHITE")
themeButton.config(activebackground="GRAY", activeforeground="WHITE")
themeButton.config(background="BLACK", foreground="WHITE")
root.configure(background='#070091')
text1.config(background="#070091", foreground="white")
text2.config(background="#070091", foreground="white")
text3.config(background="#070091", foreground="white")
text4.config(background="#070091", foreground="white")
preview.config(background="#8c03fc", foreground="white")
def removeRow():
global dataset
data_table.update_from_table()
dataset = dataset[:-1]
data_table.update_from_dataset()
# Buttons
arrowButton = Button(root,
text="+",
relief="groove",
activebackground="gray",
activeforeground="white",
background="black",
foreground="white",
font=("Arial", '14'),
command=addRow)
arrowButton.place(x=395, y=580)
minusButton = Button(root,
text="-",
relief="groove",
activebackground="gray",
activeforeground="white",
background="black",
foreground="white",
font=("Arial", '14'),
command=removeRow)
minusButton.place(x=455, y=580)
plotButton = Button(root,
text="Plot",
relief="groove",
activebackground="gray",
activeforeground="white",
background="black",
foreground="white",
font=('Arial', '16'),
command=plotGraph)
plotButton.place(x=410, y=640)
themeButton = Button(root,
text="Switch Theme",
relief="groove",
activebackground="gray",
activeforeground="white",
background="black",
foreground="white",
command=changeTheme)
themeButton.place(x=30, y=30)
root.mainloop()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trains', '0002_trainclass_fare'),
]
operations = [
migrations.CreateModel(
name='Bogey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('className', models.CharField(max_length=255, verbose_name='Seat Class')),
('seatQuota', models.CharField(max_length=255, verbose_name='Seat Quota')),
('fare', models.PositiveIntegerField(default=0, verbose_name='Fare')),
],
),
migrations.RemoveField(
model_name='trainclass',
name='className',
),
migrations.RemoveField(
model_name='trainclass',
name='fare',
),
migrations.RemoveField(
model_name='trainclass',
name='seatQuota',
),
migrations.AddField(
model_name='trainclass',
name='bogey',
field=models.ForeignKey(default=1, to='trains.Bogey'),
preserve_default=False,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.