content stringlengths 5 1.05M |
|---|
CONTENT_UNIVERSE_DIGITALISATION_LEVELS_FILENAME_HDF5 = \
'bCLEARer_digitisation_level_stereotypes_v0_03_AMi.hdf5'
CONTENT_UNIVERSE_BCLEARER_INDIVIDUAL_POWERSET_COMPONENT_FILENAME_HDF5 = \
'bCLEARer_individual_powerset hierarchy component_v0_03_AMi.hdf5'
CONTENT_UNIVERSE_BCLEARER_NAMING_PATTERN_COMPONENT_FILENAME_HDF5 = \
'bCLEARer_name_pattern_component_v0_03_AMi.hdf5'
CONTENT_UNIVERSE_BCLEARER_TOP_LEVEL_ONTIC_CATEGORIES_FILENAME_HDF5 = \
'bCLEARer_top_level_ontic_category_system_v0_05_AMi.hdf5'
CONTENT_UNIVERSE_BOSON_FILENAME_HDF5 = \
'bOSON_1_2-deep_onomatology-coordinates-v0.11-boson_1_2.hdf5'
CONTENT_UNIVERSE_BCLEARER_OS_INSPIRE_LINK_FILENAME_HDF5 = \
'bOSON_1_2-deep_onomatology-coordinates-v0.11-inspire link.hdf5'
CONTENT_UNIVERSE_OS_INSPIRE_FILENAME_HDF5 = \
'bOSON_1_2-deep_onomatology-coordinates-v0.11-inspire.hdf5'
ADJUSTMENT_UNIVERSE_OS_INSPIRE_ATTRIBUTES_TO_REMOVE_FILENAME_HDF5 = \
'OS OpenData Product Models_attributes_to_remove_boson1_2_v0_01.hdf5'
ADJUSTMENT_UNIVERSE_OS_INSPIRE_ATTRIBUTES_TO_CONVERT_FILENAME_HDF5 = \
'OS OpenData Product Models_attributes_to_convert_boson1_2_v0_02.hdf5'
|
import logging
import random
import re
import string
import weakref
from copy import copy
from pathlib import Path, PurePosixPath
from platform import node as get_hostname
from pprint import pformat
from time import time
from .DAGRIo import DAGRIo
from .utils import artist_from_url, get_remote_io, shorten_url
logger = logging.getLogger(__name__)
class DAGRCache():
@staticmethod
def with_queue_only(config, mode, deviant, mval=None, dagr_io=None,
warn_not_found=None, preload_fileslist_policy=None):
return DAGRCache.get_cache(
config, mode, deviant, mval=mval, dagr_io=dagr_io,
load_files=['existing_pages', 'no_link',
'queue', 'premium', 'httperrors'],
warn_not_found=warn_not_found, preload_fileslist_policy=preload_fileslist_policy)
@staticmethod
def with_artists_only(config, mode, deviant, mval=None, dagr_io=None,
warn_not_found=None, preload_fileslist_policy=None):
return DAGRCache.get_cache(
config, mode, deviant, mval=mval, dagr_io=dagr_io, load_files=[
'artists'],
warn_not_found=warn_not_found, preload_fileslist_policy=preload_fileslist_policy)
@staticmethod
def with_filenames_only(config, mode, deviant, mval=None, dagr_io=None,
warn_not_found=None, preload_fileslist_policy=None):
return DAGRCache.get_cache(
config, mode, deviant, mval=mval, dagr_io=dagr_io, load_files=[
'files_list'],
warn_not_found=warn_not_found, preload_fileslist_policy=preload_fileslist_policy)
@staticmethod
def with_nolink_only(config, mode, deviant, mval=None, dagr_io=None,
warn_not_found=None, preload_fileslist_policy=None):
return DAGRCache.get_cache(
config, mode, deviant, mval=mval, dagr_io=dagr_io, load_files=[
'no_link'],
warn_not_found=warn_not_found, preload_fileslist_policy=preload_fileslist_policy)
@staticmethod
def get_cache(config, mode, deviant, mval=None, dagr_io=None,
load_files=None, warn_not_found=None, preload_fileslist_policy=None):
cache_io = get_remote_io(
dagr_io if dagr_io is not None else DAGRIo, config, mode, deviant, mval)
return DAGRCache(config, cache_io, load_files=load_files, warn_not_found=warn_not_found, preload_fileslist_policy=preload_fileslist_policy)
def __init__(self, dagr_config, cache_io, load_files=None, warn_not_found=None, preload_fileslist_policy=None):
self.__id = ''.join(random.choices(
string.ascii_uppercase + string.digits, k=5))
logger.debug('Created DAGRCache %s', self.__id)
self.dagr_config = dagr_config
self.__cache_io = cache_io
self.__closed = False
# self.__lock = None
# self.__lock_path = None
self.__warn_not_found = warn_not_found
config_preload_fileslist_policy = self.dagr_config.get(
'dagr.cache', 'preload_fileslist_policy')
if config_preload_fileslist_policy == 'prohibit':
self.preload_fileslist_policy = 'disabled'
else:
self.preload_fileslist_policy = preload_fileslist_policy if not preload_fileslist_policy is None else config_preload_fileslist_policy
self.preload_http_endpoint = self.dagr_config.get(
'dagr.cache', 'preload_http_endpoint')
self.settings_name = self.dagr_config.get(
'dagr.cache', 'settings') or '.settings'
self.settings = next(self.__load_cache(
use_backup=False, settings=self.settings_name))
self.__use_short_urls = self.settings.get('shorturls')
self.fn_name = self.settings.get('filenames', '.filenames')
self.ep_name = self.settings.get(
'downloadedpages', '.dagr_downloaded_pages')
self.artists_name = self.settings.get('artists', '.artists')
self.crawled_name = self.settings.get('crawled', '.crawled')
self.nolink_name = self.settings.get('nolink', '.nolink')
self.queue_name = self.settings.get('queue', '.queue')
self.premium_name = self.settings.get('premium', '.premium')
self.httperrors_name = self.settings.get('httperrors', '.httperrors')
if load_files is None:
load_files = [
'existing_pages', 'no_link', 'queue', 'premium', 'httperrors', 'files_list', 'artists', 'last_crawled'
]
self.__excluded_fnames = [
'.lock',
self.settings_name,
self.fn_name,
self.ep_name,
self.artists_name,
self.crawled_name,
self.nolink_name,
self.queue_name,
self.premium_name,
self.httperrors_name
]
self.__excluded_fnames_regex = list(
map(re.compile, map(re.escape, self.__excluded_fnames)))
self.__excluded_fnames_regex.append(re.compile(r'.*\.tmp'))
self.__existing_pages_lower = None
self.__existing_pages = None if not 'existing_pages' in load_files else self.__load_ep()
self.__no_link = None if not 'no_link' in load_files else self.__load_nolink()
self.__queue = None if not 'queue' in load_files else self.__load_queue()
self.__premium = None if not 'premium' in load_files else self.__load_premium()
self.__httperrors = None if not 'httperrors' in load_files else self.__load_httperrors()
self.__files_list = None if not 'files_list' in load_files else self.__load_fileslist()
self.__artists = None if not 'artists' in load_files else self.__load_artists()
self.__last_crawled = None if not 'last_crawled' in load_files else self.__load_lastcrawled()
self.__files_list_lower = None
self.downloaded_pages = []
self.__queue_stale = False
self.__premium_stale = False
self.__nolink_stale = False
self.__httperrors_stale = False
if not self.__existing_pages is None and not self.__use_short_urls == self.dagr_config.get('dagr.cache', 'shorturls'):
self.__convert_urls()
def __del__(self):
logger.debug('Destroying DAGRCache %s', self.__id)
def __enter__(self):
self.__cache_io.lock()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__cache_io.release_lock()
self.close()
def close(self):
if not self.__closed:
self.__closed = True
self.cache_io.close()
self.__cache_io = None
self.__existing_pages = None
self.__no_link = None
self.__queue = None
self.__premium = None
self.__httperrors = None
self.__files_list = None
self.__artists = None
self.__last_crawled = None
self.__files_list_lower = None
self.downloaded_pages = None
def files_gen(self):
if self.__files_list is None:
self.__files_list = self.__load_fileslist()
return (f for f in self.__files_list if not any(r.match(f) for r in self.__excluded_fnames_regex))
@property
def base_dir(self): return PurePosixPath(self.__cache_io.base_dir)
@property
def rel_dir(self): return self.__cache_io.rel_dir
@ property
def files_list(self):
return list(self.files_gen())
@ property
def existing_pages(self):
if self.__existing_pages is None:
self.__existing_pages = self.__load_ep()
return self.__existing_pages
@ property
def existing_pages_lower(self):
if self.__existing_pages_lower is None:
logger.log(
level=15, msg='Generating lowercase existing pages cache')
self.__existing_pages_lower = [
l.lower() for l in self.existing_pages]
return self.__existing_pages_lower
@ property
def artists(self):
if self.__artists is None:
self.__artists = self.__load_artists()
return self.__artists
@ property
def last_crawled(self):
if self.__last_crawled is None:
self.__last_crawled = self.__load_lastcrawled()
return self.__last_crawled
@ property
def cache_io(self):
return weakref.proxy(self.__cache_io)
def __load_cache_file(self, cache_file, use_backup=True, warn_not_found=True):
return self.__cache_io.load_primary_or_backup(cache_file, use_backup=use_backup, warn_not_found=warn_not_found)
def __load_cache(self, use_backup=True, warn_not_found=True, default=None, **kwargs):
def filenames():
logger.log(level=15, msg='Building filenames cache')
files_list_raw = self.__cache_io.list_dir()
return files_list_raw
cache_defaults = {
'settings': lambda: self.dagr_config.get('dagr.cache'),
'filenames': filenames,
'existing_pages': lambda: [],
'artists': lambda: {},
'last_crawled': lambda: {'short': 'never', 'full': 'never'},
'no_link': lambda: [],
'queue': lambda: [],
'premium': lambda: [],
'httperrors': lambda: {}
}
for cache_type, cache_file in kwargs.items():
cache_contents = self.__load_cache_file(
cache_file, use_backup=use_backup, warn_not_found=warn_not_found)
if cache_contents:
yield cache_contents
else:
if not default is None:
yield default
else:
if not cache_type in cache_defaults:
raise ValueError(
'Unkown cache type: {}'.format(cache_type))
yield cache_defaults[cache_type]()
logger.log(level=5, msg=pformat(locals()))
def __load_ep(self):
logger.log(level=15, msg='Loading existing pages')
return next(
self.__load_cache(
existing_pages=self.ep_name,
warn_not_found=True if self.__warn_not_found is None else self.__warn_not_found))
def __load_nolink(self):
logger.log(level=15, msg='Loading nolink')
return next(self.__load_cache(
no_link=self.nolink_name,
warn_not_found=False if self.__warn_not_found is None else self.__warn_not_found))
def __load_queue(self):
logger.log(level=15, msg='Loading queue')
return next(self.__load_cache(
queue=self.queue_name,
warn_not_found=False if self.__warn_not_found is None else self.__warn_not_found))
def __load_premium(self):
logger.log(level=15, msg='Loading premium')
return next(self.__load_cache(
premium=self.premium_name,
warn_not_found=False if self.__warn_not_found is None else self.__warn_not_found))
def __load_fileslist(self):
logger.log(level=15, msg='Populating files list cache')
files_in_dir = set()
filenames_default = None
if self.preload_fileslist_policy == 'enable':
if self.preload_http_endpoint:
try:
files_in_dir.update(
fn for fn in self.__cache_io.list_dir() if not fn in self.__excluded_fnames)
filenames_default = []
logger.log(
level=15, msg=f"Added {len(files_in_dir)} entrys to preload list")
except:
logger.warn(
'Unable to fetch filenames preload list', exc_info=True)
logger.log(level=15, msg='Loading filenames')
files_in_dir.update(fn for fn in next(self.__load_cache(
filenames=self.fn_name,
warn_not_found=True if self.__warn_not_found is None else self.__warn_not_found,
default=filenames_default
)) if not fn in self.__excluded_fnames)
return files_in_dir
def __load_artists(self):
logger.log(level=15, msg='Loading artists')
return next(self.__load_cache(
artists=self.artists_name,
warn_not_found=False if self.__warn_not_found is None else self.__warn_not_found))
def __load_httperrors(self):
logger.log(level=15, msg='Loading http errors')
return next(self.__load_cache(
httperrors=self.httperrors_name,
warn_not_found=False if self.__warn_not_found is None else self.__warn_not_found))
def __load_lastcrawled(self):
logger.log(level=15, msg='Loading last crawled')
return next(self.__load_cache(
last_crawled=self.crawled_name,
warn_not_found=False if self.__warn_not_found is None else self.__warn_not_found))
def __ep_exists(self):
return self.__cache_io.exists(self.ep_name, update_cache=False)
def __fn_exists(self):
return self.__cache_io.exists(self.fn_name, update_cache=False)
def __artists_exists(self):
return self.__cache_io.exists(self.artists_name, update_cache=False)
def __settings_exists(self):
return self.__cache_io.exists(self.settings_name, update_cache=False)
def __update_cache(self, cache_file, cache_contents, do_backup=True):
if isinstance(cache_contents, set):
cache_contents = list(cache_contents)
self.__cache_io.save_json(cache_file, cache_contents, do_backup)
def __convert_urls(self):
logger.warning(
'Converting cache {} url format'.format(self.base_dir))
short = self.dagr_config.get('dagr.cache', 'shorturls')
base_url = self.dagr_config.get('deviantart', 'baseurl')
self.existing_pages = (
[shorten_url(p) for p in self.existing_pages] if short else
['{}/{}'.format(base_url, p) for p in self.existing_pages]
)
self.settings['shorturls'] = short
self.__update_cache(self.ep_name, self.existing_pages)
self.__update_cache(self.settings_name, self.settings, False)
self.update_artists(True)
def update_artists(self, force=False):
updated_pages = self.existing_pages if force else self.downloaded_pages
logger.log(15, 'Sorting %s artist pages', len(updated_pages))
existing_artists = self.artists
for page in updated_pages:
artist_url_p, artist_name, shortname = artist_from_url(page)
err = f"Cache entry not found {self.base_dir} : {page} : {shortname}"
try:
rfn = self.real_filename(shortname)
if rfn is None:
logger.error(err)
raise Exception(err)
except StopIteration:
logger.error(err, exc_info=True)
raise
if not artist_name in existing_artists:
existing_artists[artist_name] = {
'Home Page': str(artist_url_p), 'Artworks': {}}
existing_artists[artist_name]['Artworks'][rfn] = page
self.__update_cache(self.artists_name, existing_artists)
def rename_deviant(self, old, new):
rn_count = 0
for pcount in range(0, len(self.existing_pages)):
ep = self.existing_pages[pcount]
artist_url_p = PurePosixPath(ep).parent.parent
if artist_url_p.name == old:
result = ep.replace(old, new)
logger.log(4, 'Changing {} to {}'.format(ep, result))
self.existing_pages[pcount] = result
rn_count += 1
if rn_count > 0:
self.downloaded_pages = True
return rn_count
def save(self, save_artists=False):
fn_missing = not self.__fn_exists()
ep_missing = not self.__ep_exists()
artists_missing = not self.__artists_exists()
settings_missing = not self.__settings_exists()
fix_fn = fn_missing and bool(self.__files_list)
fix_ep = ep_missing and bool(self.__existing_pages)
fix_artists = artists_missing and bool(
self.__files_list) and bool(self.__existing_pages)
if settings_missing:
self.__update_cache(self.settings_name, self.settings, False)
if self.downloaded_pages or fix_fn:
self.__update_cache(self.fn_name, self.__files_list)
if self.downloaded_pages or fix_ep:
self.__update_cache(self.ep_name, self.__existing_pages)
if save_artists:
if self.downloaded_pages or fix_artists or save_artists == 'force':
self.update_artists(save_artists == 'force')
logger.log(level=5, msg=pformat(locals()))
def save_extras(self, full_crawl):
if self.__nolink_stale:
self.save_nolink()
if self.__queue_stale:
self.save_queue()
if self.__premium_stale:
self.save_premium()
if self.__httperrors_stale:
self.save_httperrors()
if not full_crawl is None:
self.save_crawled(full_crawl)
def save_nolink(self):
if not self.__no_link is None:
self.__update_cache(self.nolink_name, self.__no_link)
self.__nolink_stale = False
def save_queue(self):
if not self.__queue is None:
self.__update_cache(self.queue_name, self.__queue)
self.__queue_stale = False
def save_premium(self):
if not self.__premium is None:
self.__update_cache(self.premium_name, self.__premium)
self.__premium_stale = False
def save_httperrors(self):
if not self.__httperrors is None:
self.__update_cache(self.httperrors_name, self.__httperrors)
self.__httperrors_stale = False
def save_crawled(self, full_crawl=False):
if full_crawl:
self.last_crawled['full'] = time()
else:
self.last_crawled['short'] = time()
self.__update_cache(self.crawled_name, self.last_crawled)
def add_premium(self, page):
if self.__premium is None:
self.__premium = self.__load_premium()
if page in self.__premium:
return
self.remove_page_extras(page, 'premium')
self.__premium.append(page)
self.__premium_stale = True
def get_premium(self):
if self.__premium is None:
self.__premium = self.__load_premium()
return copy(self.__premium)
def get_httperrors(self):
if self.__httperrors is None:
self.__httperrors = self.__load_httperrors()
return copy(self.__httperrors)
@ property
def httperrors_exclude(self):
return set([*self.downloaded_pages, *self.existing_pages])
def add_httperror(self, page, page_error):
if self.__httperrors is None:
self.__httperrors = self.__load_httperrors()
if not page in self.httperrors_exclude:
self.__httperrors_stale = True
if not page in self.__httperrors:
self.__httperrors[page] = []
self.__httperrors[page].append({
'host': get_hostname(),
'time': time(),
'error_code': page_error.http_code
})
@ property
def nl_exclude(self):
if self.__no_link is None:
self.__no_link = self.__load_nolink()
return set([*self.downloaded_pages, *self.existing_pages, *self.__no_link, *self.__premium, *self.__httperrors])
def add_nolink(self, page):
if self.__no_link is None:
self.__no_link = self.__load_nolink()
if self.__premium is None:
self.__premium = self.__load_premium()
if self.__httperrors is None:
self.__httperrors = self.__load_httperrors()
if page in self.nl_exclude:
return
self.__nolink_stale = True
self.remove_page_extras(page, 'nolink')
self.__no_link.append(page)
def remove_nolink(self, pages):
if self.__no_link is None:
self.__no_link = self.__load_nolink()
remove = set([p for p in (pages if isinstance(pages, list)
else list(pages)) if p in self.__no_link])
rcount = len(remove)
if rcount > 0:
self.__nolink_stale = True
self.__no_link = list(set(self.__no_link) - remove)
return rcount
def prune_nolink(self):
if self.__no_link is None:
self.__no_link = self.__load_nolink()
nlcount = len(self.__no_link)
keep = set(self.__no_link) - self.nl_exclude
kcount = len(keep)
delta = nlcount - kcount
if not delta == 0:
self.__nolink_stale = True
self.__no_link = list(keep)
return delta
def get_nolink(self):
if self.__no_link is None:
self.__no_link = self.__load_nolink()
return copy(self.__no_link)
def get_queue(self):
if self.__queue is None:
self.__queue = self.__load_queue()
return copy(self.__queue)
@ property
def q_exclude(self):
result = set()
if self.__premium is None:
self.__premium = self.__load_premium()
result.update((u.lower() for u in self.__premium))
if self.__httperrors is None:
self.__httperrors = self.__load_httperrors()
errors_404 = [k for k, v in self.__httperrors.items() if any(
e.get('error_code', None) == 404 for e in v)]
result.update((u.lower() for u in errors_404))
result.update((u.lower() for u in self.existing_pages))
return result
def add_queue(self, page):
if self.__queue is None:
self.__queue = self.__load_queue()
if page.lower() in self.q_exclude:
return
self.__queue_stale = True
self.__queue.append(page)
def update_queue(self, pages):
if self.__queue is None:
self.__queue = self.__load_queue()
exclude = self.q_exclude
logger.log(level=15, msg=f"Queue exclude length is {len(exclude)}")
keep = set(kp for kp in (p.lower()
for p in self.__queue) if not kp in exclude)
enqueue = set(ep for ep in (p.lower() for p in pages)
if not ep in exclude and not ep in keep)
ecount = len(enqueue)
if ecount > 0:
self.__queue_stale = True
self.__queue = [*keep, *enqueue]
self.save_queue()
return ecount
def prune_queue(self):
if self.__queue is None:
self.__queue = self.__load_queue()
qcount = len(self.__queue)
exclude = self.q_exclude
keep = set(u for u in self.__queue if not u.lower() in exclude)
kcount = len(keep)
delta = qcount - kcount
if not delta == 0:
self.__queue_stale = True
self.__queue = list(keep)
return delta
def remove_page_extras(self, page, reason):
if self.__no_link is None:
self.__no_link = self.__load_nolink()
if self.__queue is None:
self.__queue = self.__load_queue()
if self.__premium is None:
self.__premium = self.__load_premium()
if self.__httperrors is None:
self.__httperrors = self.__load_httperrors()
if page in self.__queue:
self.__queue.remove(page)
self.__queue_stale = True
logger.log(level=5, msg=f"Removed {page} from queue")
if not reason == 'nolink' and page in self.__no_link:
self.__no_link.remove(page)
self.__nolink_stale = True
logger.log(level=5, msg=f"Removed {page} from no-link list")
if not reason == 'premium' and page in self.__premium:
self.__premium.remove(page)
self.__premium_stale = True
logger.log(level=5, msg=f"Removed {page} from premium list")
if not reason == 'httperror' and page in self.__httperrors:
del self.__httperrors[page]
self.__httperrors_stale = True
logger.log(level=5, msg=f"Removed {page} from httperrors list")
def add_link(self, page):
self.remove_page_extras(page, 'found')
if self.__use_short_urls:
page = shorten_url(page)
if page not in self.existing_pages:
self.downloaded_pages.append(page)
self.existing_pages.append(page)
if page in self.__queue:
self.__queue.remove(page)
elif self.dagr_config.get('dagr', 'overwrite'):
self.downloaded_pages.append(page)
def check_link(self, page):
if self.__use_short_urls:
page = shorten_url(page)
if page in self.existing_pages:
return True
# logger.log(
# level=5, msg='Checking for lowercase link {}'.format(page))
return page.lower() in self.existing_pages_lower
def filter_links(self, links):
return [l for l in links if not self.check_link(l)]
def add_filename(self, fn):
if self.__files_list is None:
self.__files_list = self.__load_fileslist()
if fn in self.__files_list:
logger.log(
level=15, msg=f"{fn} already in filenames cache")
else:
logger.log(level=5, msg=f"Adding {fn} to filenames cache")
self.__files_list.add(fn)
self.__cache_io.update_fn_cache(fn)
# if not self.__files_list_lower is None:
# self.__files_list_lower[fn.lower()] = fn
def real_filename(self, shortname):
sn_lower = shortname.lower()
return next(fn for fn in self.files_list if sn_lower in fn.lower())
# if self.__files_list_lower is None:
# logger.log(level=15, msg='Generating lowercase fn cache')
# lower_gen = ((fn.lower(), fn)
# for fn in self.files_gen())
# self.__files_list_lower = dict(lower_gen)
# logger.log(level=15, msg=f"Generated {len(self.__files_list_lower)} lowercase fn cache items")
# entry = self.__files_list_lower.get(sn_lower, None)
# if not entry is None:
# logger.log(level=15, msg=f"Got lowercase fn cache hit {entry} for {sn_lower}")
# return entry
# fll_values = self.__files_list_lower.values()
# for rfn, lfn in lower_gen:
# self.__files_list_lower[lfn] = rfn
# if lfn == sn_lower:
# logger.log(level=15, msg=f"Got lcfn gen hit {rfn} for {sn_lower}")
# return rfn
# return None
def prune_filename(self, fname):
self.__files_list.discard(fname)
|
# Copyright 2019-2020 SURF.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http import HTTPStatus
from aiocache import Cache
from fastapi.routing import APIRouter
from starlette.background import BackgroundTasks
from server.settings import app_settings
router = APIRouter()
@router.delete("/cache/{name}", status_code=HTTPStatus.NO_CONTENT)
async def clear_cache(name: str, background_tasks: BackgroundTasks) -> None:
cache = Cache(Cache.REDIS, endpoint=app_settings.CACHE_HOST, port=app_settings.CACHE_PORT)
if name == "all":
background_tasks.add_task(cache.delete, "/*")
else:
background_tasks.add_task(cache.delete, f"/{name}*")
|
from flask import request
from flask_restplus import Namespace, Resource,fields
ns_1 = Namespace('v1',description='challenge 2')
mod = ns_1.model('my model',{
'name': fields.String(description='name')
})
@ns_1.route('/home')
class Sales(Resource):
@ns_1.expect(mod,validate=True)
def post(self):
data = request.get_json()
return data,404
@ns_1.route('/home/s')
class Sale(Resource):
@ns_1.expect(mod,validate=True)
def put(self):
data = request.get_json()
return data,404
@ns_1.route('/home/sa')
class Sal(Resource):
def get(self):
data = {'':''}
return data,200 |
# Get spotify credentials at https://developer.spotify.com/dashboard/applications/
SPOTIFY_CLIENT_ID = 'xxx'
SPOTIFY_CLIENT_SECRET = 'xxx'
SPOTIFY_REDIRECT_URI = 'xxx'
# Get your username at https://www.spotify.com/is/account/overview/
USERNAME = 'xxx'
# Get Genius access token at http://genius.com/api-clients
GENIUS_CLIENT_ACCESS_TOKEN = 'xxx'
|
import tweepy
import requests
from bs4 import BeautifulSoup as bs
class Collector:
def __init__(self, **kwargs):
auth = tweepy.OAuthHandler(kwargs['consumer_key'], kwargs['consumer_secret'])
auth.set_access_token(kwargs['token_key'], kwargs['token_secret'])
try:
self.api = tweepy.API(auth)
print('otentifikasi berhasil')
except Exception as err:
raise ('otentifikasi gagal...')
self.urls = []
self.target = None
def get_urls(self, **kwargs):
"""
target: str
count: str
min_rt: int
min_fav: int
"""
self.target = kwargs['target']
tweets = self.api.user_timeline(screen_name=kwargs['target'], count=kwargs['count'], include_rts=False)
min_rt = kwargs['min_rt']
min_fav = kwargs['min_fav']
for tweet in tweets:
if tweet.favorite_count >= min_fav or tweet.retweet_count >= min_rt:
try:
self.urls.append(tweet.entities['urls'][0]['url']) # buggy
except:
continue
#print(tweet.entities['urls'][0]['url'])
def parse_text(self):
if self.target is not None:
if self.target == 'TirtoID':
target_tag = 'content-text-editor'
elif self.target == 'kompascom':
target_tag = 'bla' # DUMMY
for url in self.urls:
print(f'scraping {url}')
html = requests.get(url).text
soup = bs(html, 'html.parser')
isi_konten = soup.find_all('div', target_tag)
for konten in isi_konten:
print(konten.text)
def __getitem__(self, index):
return self.urls[index]
|
# Testing Code for MotionModel.py
# Matt Schmittle
# DO NOT EDIT
import math
import unittest
from threading import Lock
import matplotlib.pyplot as plt
import numpy as np
from mushr_pf.motion_model import KinematicMotionModel
# Radius to count particles around ground truth
RADIUS = 0.25
SHOW_PLOT = False
def euclidean_distance(a, b):
return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def model_error(model_particles, true_pose):
r_disc = []
for particle in model_particles:
distance = euclidean_distance(true_pose, particle)
if distance <= RADIUS:
r_disc.append(distance)
return len(r_disc)
class TestMotionModel(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMotionModel, self).__init__(*args, **kwargs)
self.num_particles = 100
self.ctrls = [[1.0, 0.34, 0.1], [3.0, 0.4, 0.5], [3.0, 0.4, 0.5]]
self.truths = [
[0.09937757, 0.0033936, 0.10634897],
[0.76003705, 0.99738116, 1.86917909],
[0.7661687, 0.97692261, 1.85254576],
]
def mm_1(self, mm):
mm.apply_motion_model(mm.particles, self.ctrls[0])
return model_error(mm.particles, self.truths[0])
def mm_2(self, mm):
mm.apply_motion_model(mm.particles, self.ctrls[1])
return model_error(mm.particles, self.truths[1])
def mm_3(self, mm):
mm.apply_motion_model(mm.particles, self.ctrls[2])
return model_error(mm.particles, self.truths[2])
def test_motion_model(self):
counts = [[] for _ in range(3)]
mms = [self.mm_1, self.mm_2, self.mm_3]
# 99 tests. Hint: set me to 100 to have insight on the question about resampling
for i in range(0, 99):
start_particles = np.zeros((self.num_particles, 3))
# Instatiate Your Motion Model
motion_model = self.new_mm(start_particles)
# Apply 3 different controls
counts[i % 3].append(mms[i % 3](motion_model))
self.assertTrue(np.mean(counts[0]) > 90)
self.assertTrue(
np.mean(counts[1]) > 15,
msg="Mean: %.3f There was a bug in the source code, the particle filter is functional,"
"but the RADIUS is either too small, or the parameters chosen are way too"
"large (even though the PF is really good)." % np.mean(counts[1]),
)
self.assertTrue(
np.mean(counts[2]) > 10,
msg="Mean: %f There was a bug in the source code, the particle filter is functional,"
"but the RADIUS is either too small, or the parameters chosen are way too"
"large (even though the PF is really good)." % np.mean(counts[2]),
)
if SHOW_PLOT:
for i in range(3):
# Plot
plt.figure()
arrow_thickness = 0.03
plt.xlabel("x")
plt.ylabel("y")
plt.ylim(bottom=-0.1)
plt.xlim(left=-0.1)
plt.ylim(top=1.5)
start_particles = np.zeros((self.num_particles, 3))
mm = self.new_mm(start_particles)
mms[i](mm)
plt.quiver(
mm.particles[:, 0],
mm.particles[:, 1],
arrow_thickness * np.cos(mm.particles[:, 2]),
arrow_thickness * np.sin(mm.particles[:, 2]),
color="b",
)
# Plot start
delta = 0
dx = arrow_thickness * math.cos(delta)
dy = arrow_thickness * math.sin(delta)
plt.quiver(0, 0, dx, dy, color="r")
# Plot finish ground truth
dx = arrow_thickness * math.cos(self.truths[i][2])
dy = arrow_thickness * math.sin(self.truths[i][2])
plt.quiver(self.truths[i][0], self.truths[i][1], dx, dy, color="g")
# Plot radius of acceptance around ground truth
ax = plt.gca()
ax.add_artist(
plt.Circle(self.truths[i][:2], RADIUS, color="g", fill=False)
)
plt.show()
def new_mm(self, start_particles):
state_lock = Lock()
# Instatiate Your Motion Model
mm = KinematicMotionModel(
"/fake_topic1",
"/fake_topic2",
0.0,
4350,
0.5,
-1.2135,
0.33,
start_particles,
state_lock,
)
return mm
if __name__ == "__main__":
unittest.main()
|
#Parameter definitions
keystone_host_port = '192.168.122.1:5000'
mapred_tenant_name = 'mapreduce'
mapred_image_name = 'hadoop104'
|
#!/usr/bin/env python
import sys
import time
import pomdp_parser
import policy_parser
import readline
# import speech_recognizer
import numpy
import random
from scipy import stats
sys.path.append('/home/ludc/software/python_progress/progress-1.2')
sys.path.append('/home/szhang/software/python_progress/progress-1.2')
from progress.bar import Bar
import subprocess
import gen_dis_plog
import conf
class Simulator(object):
def __init__(self,
auto_observations=True,
auto_state = False,
uniform_init_belief =True,
print_flag=True,
use_plog = True,
policy_file='policy/default.policy',
pomdp_file='models/default.pomdp',
trials_num=1000,
num_item=1,
num_person=1,
num_room=1):
# print(pomdp_file)
# print(policy_file)
self.auto_observations = auto_observations
self.auto_state = auto_state
self.uniform_init_belief = uniform_init_belief
self.print_flag = print_flag
self.use_plog = use_plog
self.trials_num = trials_num
self.num_item = num_item
self.num_person = num_person
self.num_room = num_room
self.tablelist = conf.tablelist
# to read the pomdp model
model = pomdp_parser.Pomdp(filename=pomdp_file, parsing_print_flag=False)
self.states = model.states
self.actions = model.actions
self.observations = model.observations
# print self.observations
self.trans_mat = model.trans_mat
self.obs_mat = model.obs_mat
self.reward_mat = model.reward_mat
# to read the learned policy
self.policy = policy_parser.Policy(len(self.states), len(self.actions),
filename=policy_file)
self.b = None
self.a = None
self.o = None
self.md = 'happy'
self.fl = True
self.trigger= 1 ###triggle of event in which dialog turn
# self.dialog_turn = 0
self.plog = gen_dis_plog.DistrGen()
# to make the screen print simple
numpy.set_printoptions(precision=2)
if self.use_plog:
print '------------ context-aware-icorpp -----------------'
else:
print '------------ previous icorpp -----------------'
#######################################################################
def init_belief(self):
if self.uniform_init_belief:
self.b = numpy.ones(len(self.states)) / float(len(self.states))
if self.use_plog and (self.md == 'sad' or self.fl == False):
if self.b[len(self.tablelist)] == 1:
return
# print '\n',self.b
belief = self.plog.cal_belief(mood = self.md, foll = self.fl, pdpDist = self.b, curr_table = self.ct, prev_table = self.pt).split(',')
for i in range(len(belief)):
belief[i] = float(belief[i].strip())
self.b = numpy.array(belief)
self.b = self.b/ sum(self.b)
# print '\n',self.s, self.ct, self.b
else:
# here initial belief is sampled from a Dirichlet distribution
self.b = numpy.random.dirichlet( numpy.ones(len(self.states)) )
self.b = self.b.T
#######################################################################
def observe(self):
self.o = None
if self.auto_observations:
rand = numpy.random.random_sample()
acc = 0.0
for i in range(len(self.observations)):
acc += self.obs_mat[self.a, self.s, i]
if acc > rand:
self.o = i
break
if self.o == None:
sys.exit('Error: observation is not properly sampled')
else:
ind = input("Please input the name of observation: ")
self.o = next(i for i in range(len(self.observations)) \
if self.observations[i] == ind)
#######################################################################
def update(self,cycletime):
new_b = numpy.dot(self.b, self.trans_mat[self.a, :])
new_b = [new_b[i] * self.obs_mat[self.a, i, self.o] for i in range(len(self.states))]
# print 'sum of belief: ',sum(new_b)
self.b = (new_b / sum(new_b)).T
if cycletime == self.trigger and self.use_plog and (self.md == 'sad' or self.fl == False):
if self.b[len(self.tablelist)] == 1:
return
# print '\n',self.b
belief = self.plog.cal_belief(mood = self.md, foll = self.fl, pdpDist = self.b, curr_table = self.ct, prev_table = self.pt).split(',')
# belief = self.plog.cal_belief(mood = 'sad', pdpDist = self.b, curr_table = self.ct).split(',')
for i in range(len(belief)):
belief[i] = float(belief[i].strip())
self.b = numpy.array(belief)
self.b = self.b/ sum(self.b)
#######################################################################
def run(self):
cost = 0.0
self.init_belief()
reward = 0.0
overall_reward = 0.0
cycletime = 0
while True:
cycletime += 1
# print self.b
if self.print_flag:
print('\tstate:\t' + self.states[self.s] + ' ' + str(self.s))
print('\tcost so far:\t' + str(cost))
self.a = int(self.policy.select_action(self.b))
if self.print_flag:
print('\taction:\t' + self.actions[self.a] + ' ' + str(self.a))
self.observe()
if self.print_flag:
print('\tobserve:\t'+self.observations[self.o]+' '+str(self.o))
self.update(cycletime)
if self.print_flag:
print('\nbelief:\t' + str(self.b))
overall_reward += self.reward_mat[self.a, self.s]
# print('current cost: ' + str(self.reward_mat[self.a, self.s]))
# print('overall cost: ' + str(overall_reward))
# print self.actions[self.a]
if 'go' in self.actions[self.a]:
# print '--------------------',
if self.print_flag is True:
print('\treward: ' + str(self.reward_mat[self.a, self.s]))
reward += self.reward_mat[self.a, self.s]
break
else:
cost += self.reward_mat[self.a, self.s]
if cycletime == 20:
cost += self.reward_mat[self.a, self.s]
break
return reward, cost, overall_reward
#######################################################################
def run_numbers_of_trials(self):
cost_list = []
success_list = []
reward_list = []
overall_reward_list = []
string_i = ''
string_p = ''
string_r = ''
bar = Bar('Processing', max=self.trials_num)
for i in range(self.trials_num):
# get a sample as the current state, terminal state exclusive
if self.auto_state:
self.s = numpy.random.randint(low=0, high=len(self.states)-1,
size=(1))[0]
tuples = self.states[self.s].split('_')
ids = [int(tuples[0][1]),int(tuples[1][1]),int(tuples[2][1])]
self.ct = numpy.random.randint(low=0, high=len(self.tablelist),size=(1))[0] ###curr table
self.pt = self.ct - 1 if self.ct != 0 else len(self.tablelist)-1
self.md = 'happy'
self.fl = True
# print self.tablelist[self.ct], ids
if self.tablelist[self.ct][0] != ids[0] and self.tablelist[self.ct][1] != ids[1] and self.tablelist[self.ct][2] != ids[2]:
self.md = 'sad'
if self.tablelist[self.pt][0] == ids[0] and self.tablelist[self.pt][1] == ids[1] and self.tablelist[self.pt][2] == ids[2]:
self.fl = False
else:
self.s = int(input("Please specify the index of state: "))
# run this episode and save the reward
reward, cost, overall_reward = self.run()
reward_list.append(reward)
cost_list.append(cost)
overall_reward_list.append(overall_reward)
guide_index = int(self.a - (3 + self.num_item + self.num_person \
+ self.num_room))
if guide_index == int(self.s):
success_list.append(1.0)
else:
success_list.append(0.0)
bar.next()
bar.finish()
cost_arr = numpy.array(cost_list)
success_arr = numpy.array(success_list)
reward_arr = numpy.array(reward_list)
overall_reward_arr = numpy.array(overall_reward_list)
print('average cost: ' + str(numpy.mean(cost_arr))[1:] + \
' with std ' + str(numpy.std(cost_arr)))
print('average success: ' + str(numpy.mean(success_arr)) + \
' with std ' + str(numpy.std(success_arr)))
print('average reward: ' + str(numpy.mean(reward_arr)) + \
' with std ' + str(numpy.std(reward_arr)))
print('average overall reward: ' + str(numpy.mean(overall_reward_arr)) + \
' with std ' + str(numpy.std(overall_reward_arr)))
return (numpy.mean(cost_arr), numpy.mean(success_arr), \
numpy.mean(reward_arr))
def main():
s = Simulator(uniform_init_belief = True,
auto_state = True,
auto_observations = True,
print_flag = False,
use_plog = True,
policy_file = '333_new.policy',
pomdp_file = '333_new.pomdp',
trials_num = 1000,
num_item = 3,
num_person = 3,
num_room = 3)
if not s.uniform_init_belief:
print('note that initial belief is not uniform\n')
s.run_numbers_of_trials()
if __name__ == '__main__':
main()
|
# Imports here
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import random
import train_args
import os
import torch
import torchvision
from torchvision import datasets, transforms, models
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision.models as models
import time
import seaborn as sns
import numpy as np
import pandas as pd
from PIL import Image
from collections import OrderedDict
import json
def main():
data_dir = 'flowers'
#train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
parser = train_args.get_args()
cli_args = parser.parse_args()
# check for data directory
if not os.path.isdir(cli_args.data_directory):
print(f'Data directory {cli_args.data_directory} was not found.')
exit(1)
# check for save directory
if not os.path.isdir(cli_args.save_dir):
print(f'Directory {cli_args.save_dir} does not exist. Creating...')
os.makedirs(cli_args.save_dir)
# load categories
with open(cli_args.categories_json, 'r') as f:
cat_to_name = json.load(f)
# set output to the number of categories
output_size = len(cat_to_name)
print(f"Images are labeled with {output_size} categories.")
data_transforms = {
'training': transforms.Compose([
transforms.RandomRotation(35),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'validation': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'testing': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
}
#Load the datasets with ImageFolder
image_datasets = {
'training' : datasets.ImageFolder(cli_args.data_directory, transform=data_transforms['training']),
'testing' : datasets.ImageFolder(test_dir, transform=data_transforms['testing']),
'validation' : datasets.ImageFolder(valid_dir, transform=data_transforms['validation'])
}
#Using the image datasets and the trainforms, define the dataloaders
dataloaders = {
'training' : torch.utils.data.DataLoader(image_datasets['training'], batch_size=64, shuffle=True),
'testing' : torch.utils.data.DataLoader(image_datasets['testing'], batch_size=64, shuffle=False),
'validation' : torch.utils.data.DataLoader(image_datasets['validation'], batch_size=64, shuffle=True)
}
# Make model
if not cli_args.arch.startswith("vgg") and not cli_args.arch.startswith("densenet"):
print("Only supporting VGG and DenseNet")
exit(1)
print(f"Using a pre-trained {cli_args.arch} network.")
model = models.__dict__[cli_args.arch](pretrained=True)
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(25088, 4096)), # First layer
('relu', nn.ReLU()), # Apply activation function
('fc2', nn.Linear(4096, 102)), # Output layer
('output', nn.LogSoftmax(dim=1)) # Apply loss function
]))
for param in model.parameters():
param.requires_grad = False
model.classifier = classifier
def train(model, epochs, learning_rate, criterion, optimizer, training_loader, validation_loader):
model.train() # Puts model into training mode
print_every = 40
steps = 0
use_gpu = False
# Check to see whether GPU is available
if torch.cuda.is_available():
use_gpu = True
model.cuda()
else:
model.cpu()
# Iterates through each training pass based on #epochs & GPU/CPU
for epoch in range(epochs):
running_loss = 0
for inputs, labels in iter(training_loader):
steps += 1
if use_gpu:
inputs = Variable(inputs.float().cuda())
labels = Variable(labels.long().cuda())
else:
inputs = Variable(inputs)
labels = Variable(labels)
# Forward and backward passes
optimizer.zero_grad() # zero's out the gradient, otherwise will keep adding
output = model.forward(inputs) # Forward propogation
loss = criterion(output, labels) # Calculates loss
loss.backward() # Calculates gradient
optimizer.step() # Updates weights based on gradient & learning rate
running_loss += loss.item()
if steps % print_every == 0:
validation_loss, accuracy = validate(model, criterion, validation_loader)
print("Epoch: {}/{} ".format(epoch+1, epochs),
"Training Loss: {:.3f} ".format(running_loss/print_every),
"Validation Loss: {:.3f} ".format(validation_loss),
"Validation Accuracy: {:.3f}".format(accuracy))
def validate(model, criterion, data_loader):
model.eval() # Puts model into validation mode
accuracy = 0
test_loss = 0
for inputs, labels in iter(data_loader):
if torch.cuda.is_available():
inputs = Variable(inputs.float().cuda(), volatile=True)
labels = Variable(labels.long().cuda(), volatile=True)
else:
inputs = Variable(inputs, volatile=True)
labels = Variable(labels, volatile=True)
output = model.forward(inputs)
test_loss += criterion(output, labels).item()
ps = torch.exp(output).data
equality = (labels.data == ps.max(1)[1])
accuracy += equality.type_as(torch.FloatTensor()).mean()
return test_loss/len(data_loader), accuracy/len(data_loader)
print("Success")
'''
epoch = cli_args.epochs,
state_dict = model.state_dict(),
optimizer_dict = optimizer.state_dict(),
classifier = model.classifier,
class_to_idx = nn_model.class_to_idx,
arch = cli_args.arch
'''
epochs = cli_args.epochs
learning_rate = cli_args.learning_rate
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
train(model, epochs, learning_rate, criterion, optimizer, dataloaders['training'], dataloaders['validation'])
#Save checkpoint
model.class_to_idx = image_datasets['training'].class_to_idx
model.cpu()
torch.save({'arch': cli_args.arch,
'state_dict': model.state_dict(), # Holds all the weights and biases
'class_to_idx': model.class_to_idx},
'checkpoint.pth')
print("Checkpoint saved.")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(0) |
# Dependencies
import pandas as pd
import requests as req
import time
from bs4 import BeautifulSoup as bs
from splinter import Browser
from pprint import pprint
import pymongo
from flask import Flask, render_template
import numpy as np
import json
from selenium import webdriver
def scrape():
#Getting ChromeDriver path
executable_path = {'executable_path': 'chromedriver.exe'}
#Getting the browser
browser = Browser('chrome', **executable_path, headless=False)
#Defining an empty collection
mars_collection = {}
# Getting NASA Mars News
url = ('https://mars.nasa.gov/news/')
browser.visit(url)
response = req.get(url)
time.sleep(1)
soup = bs(response.text, 'html.parser')
mars_collection["news_title"] = soup.find('div', class_="content_title").get_text()
mars_collection["news_photo"] = soup.find('div', class_="rollover_description_inner").get_text()
#Getting Space Images
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
response = req.get(url)
time.sleep(1)
soup = bs(response.text, 'html.parser')
image_url = soup.find('article', class_='carousel_item')
footer = image_url.find('footer')
ref = footer.find('a')
path = ref['data-fancybox-href']
featured_image_url = ('https://www.jpl.nasa.gov' + path)
mars_collection["featured_image_url"] = featured_image_url
#Getting Mars Facts
url = 'https://space-facts.com/mars/'
browser.visit(url)
table = pd.read_html(url)
df = table[0]
df.columns = ["Facts", "Value"]
facts_html = df.to_html()
facts_html = facts_html.replace("\n","")
mars_collection["fact_table"] = facts_html
#Mars Hemispheres
#empty list
hemisphere_image_urls =[]
#Cerberus Hemisphere
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced'
browser.visit(url)
response = req.get(url)
time.sleep(1)
soup = bs(response.text, 'html.parser')
Cerberus_image = soup.find_all('div', class_="wide-image-wrapper")
for image in Cerberus_image:
picture = image.find('li')
Cerberus_image_url = picture.find('a')['href']
cerberus_title = soup.find('h2', class_='title').text
Cerberus_Hemisphere = {"Title": cerberus_title, "url": Cerberus_image_url}
hemisphere_image_urls.append(Cerberus_Hemisphere)
#Schiaparelli Hemisphere
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced'
browser.visit(url)
response = req.get(url)
time.sleep(1)
soup = bs(response.text, 'html.parser')
shiaparelli_image = soup.find_all('div', class_="wide-image-wrapper")
for image in shiaparelli_image:
picture = image.find('li')
shiaparelli_image_url = picture.find('a')['href']
shiaparelli_title = soup.find('h2', class_='title').text
Schiaparelli_Hemisphere = {"Title": shiaparelli_title, "url": shiaparelli_image_url}
hemisphere_image_urls.append(Schiaparelli_Hemisphere)
#Syrtis Major Hemisphere
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced'
browser.visit(url)
response = req.get(url)
time.sleep(1)
soup = bs(response.text, 'html.parser')
syrtris_image = soup.find_all('div', class_="wide-image-wrapper")
for image in syrtris_image:
picture = image.find('li')
syrtris_image_url = picture.find('a')['href']
syrtris_title = soup.find('h2', class_='title').text
Syrtis_Major_Hemisphere = {"Title": syrtris_title, "url": syrtris_image_url}
hemisphere_image_urls.append(Syrtis_Major_Hemisphere)
#Valles Marineris Hemisphere
url = 'https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced'
browser.visit(url)
response = req.get(url)
time.sleep(1)
soup = bs(response.text, 'html.parser')
valles_marineris_image = soup.find_all('div', class_="wide-image-wrapper")
for image in valles_marineris_image:
picture = image.find('li')
valles_marineris_image_url = picture.find('a')['href']
valles_marineris_title = soup.find('h2', class_='title').text
Valles_Marineris_Hemisphere = {"Title": valles_marineris_title, "url": valles_marineris_image_url}
hemisphere_image_urls.append(Valles_Marineris_Hemisphere)
mars_collection["hemisphere_image"] = hemisphere_image_urls
return mars_collection
if __name__ == "__main__":
scrape()
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Subtitle(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, src=None, srclang=None, name=None, font=None, size=None, color=None, text_shadow=None, background=None, opacity=None, italic_color=None):
"""
Subtitle - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'src': 'str',
'srclang': 'str',
'name': 'str',
'font': 'str',
'size': 'float',
'color': 'str',
'text_shadow': 'str',
'background': 'str',
'opacity': 'int',
'italic_color': 'bool'
}
self.attribute_map = {
'id': 'id',
'src': 'src',
'srclang': 'srclang',
'name': 'name',
'font': 'font',
'size': 'size',
'color': 'color',
'text_shadow': 'text_shadow',
'background': 'background',
'opacity': 'opacity',
'italic_color': 'italic_color'
}
self._id = id
self._src = src
self._srclang = srclang
self._name = name
self._font = font
self._size = size
self._color = color
self._text_shadow = text_shadow
self._background = background
self._opacity = opacity
self._italic_color = italic_color
@property
def id(self):
"""
Gets the id of this Subtitle.
:return: The id of this Subtitle.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Subtitle.
:param id: The id of this Subtitle.
:type: int
"""
self._id = id
@property
def src(self):
"""
Gets the src of this Subtitle.
:return: The src of this Subtitle.
:rtype: str
"""
return self._src
@src.setter
def src(self, src):
"""
Sets the src of this Subtitle.
:param src: The src of this Subtitle.
:type: str
"""
self._src = src
@property
def srclang(self):
"""
Gets the srclang of this Subtitle.
:return: The srclang of this Subtitle.
:rtype: str
"""
return self._srclang
@srclang.setter
def srclang(self, srclang):
"""
Sets the srclang of this Subtitle.
:param srclang: The srclang of this Subtitle.
:type: str
"""
self._srclang = srclang
@property
def name(self):
"""
Gets the name of this Subtitle.
:return: The name of this Subtitle.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Subtitle.
:param name: The name of this Subtitle.
:type: str
"""
self._name = name
@property
def font(self):
"""
Gets the font of this Subtitle.
:return: The font of this Subtitle.
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""
Sets the font of this Subtitle.
:param font: The font of this Subtitle.
:type: str
"""
self._font = font
@property
def size(self):
"""
Gets the size of this Subtitle.
:return: The size of this Subtitle.
:rtype: float
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this Subtitle.
:param size: The size of this Subtitle.
:type: float
"""
self._size = size
@property
def color(self):
"""
Gets the color of this Subtitle.
:return: The color of this Subtitle.
:rtype: str
"""
return self._color
@color.setter
def color(self, color):
"""
Sets the color of this Subtitle.
:param color: The color of this Subtitle.
:type: str
"""
self._color = color
@property
def text_shadow(self):
"""
Gets the text_shadow of this Subtitle.
:return: The text_shadow of this Subtitle.
:rtype: str
"""
return self._text_shadow
@text_shadow.setter
def text_shadow(self, text_shadow):
"""
Sets the text_shadow of this Subtitle.
:param text_shadow: The text_shadow of this Subtitle.
:type: str
"""
self._text_shadow = text_shadow
@property
def background(self):
"""
Gets the background of this Subtitle.
:return: The background of this Subtitle.
:rtype: str
"""
return self._background
@background.setter
def background(self, background):
"""
Sets the background of this Subtitle.
:param background: The background of this Subtitle.
:type: str
"""
self._background = background
@property
def opacity(self):
"""
Gets the opacity of this Subtitle.
:return: The opacity of this Subtitle.
:rtype: int
"""
return self._opacity
@opacity.setter
def opacity(self, opacity):
"""
Sets the opacity of this Subtitle.
:param opacity: The opacity of this Subtitle.
:type: int
"""
self._opacity = opacity
@property
def italic_color(self):
"""
Gets the italic_color of this Subtitle.
:return: The italic_color of this Subtitle.
:rtype: bool
"""
return self._italic_color
@italic_color.setter
def italic_color(self, italic_color):
"""
Sets the italic_color of this Subtitle.
:param italic_color: The italic_color of this Subtitle.
:type: bool
"""
self._italic_color = italic_color
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# sources: https://github.com/bieli/stopwords/blob/master/polish.stopwords.txt and https://github.com/stopwords-iso/stopwords-pl
STOP_WORDS = set(
"""
a aby ach acz aczkolwiek aj albo ale alez
ależ ani az aż
bardziej bardzo beda bede bedzie bez bo bowiem by
byc byl byla byli bylo byly bym bynajmniej być był
była było były będzie będą będę
cala cali caly cała cały chce choć ci cie
ciebie cię co cokolwiek coraz cos coś czasami czasem czemu
czy czyli często
daleko dla dlaczego dlatego do dobrze dokad dokąd
dosc dość duzo dużo dwa dwaj dwie dwoje dzis
dzisiaj dziś
gdy gdyby gdyz gdyż gdzie gdziekolwiek gdzies gdzieś go
godz
i ich ile im inna inne inny
innych iv ix iz iż
ja jak jakas jakaś jakby jaki jakichs jakichś jakie
jakis jakiz jakiś jakiż jakkolwiek jako jakos jakoś je jeden
jedna jednak jednakze jednakże jedno jednym jedynie jego jej jemu
jesli jest jestem jeszcze jezeli jeśli jeżeli juz już ją
kazdy każdy kiedy kierunku kilka kilku kims kimś kto
ktokolwiek ktora ktore ktorego ktorej ktory ktorych ktorym ktorzy ktos
ktoś która które którego której który których którym którzy ku
lecz lub
ma mają mam mamy mało mi miał miedzy
mimo między mna mnie mną moga mogą moi moim moj
moja moje moze mozliwe mozna może możliwe można mu musi
my mój
na nad nam nami nas nasi nasz nasza nasze
naszego naszych natomiast natychmiast nawet nia nic nich nie niech
niego niej niemu nigdy nim nimi niz nią niż no
o obok od ok około on ona one
oni ono oraz oto owszem
pan pana pani po pod podczas pomimo ponad
poniewaz ponieważ powinien powinna powinni powinno poza prawie przeciez
przecież przed przede przedtem przez przy
raz razie roku rowniez również
sam sama sie się skad skąd soba sobie sobą
sposob sposób swoje są
ta tak taka taki takich takie takze także tam
te tego tej tel temu ten teraz też to toba
tobie tobą totez toteż totobą trzeba tu tutaj twoi twoim
twoj twoja twoje twym twój ty tych tylko tym tys
tzw tę
u
vi vii viii
w wam wami was wasi wasz wasza wasze we
według wie wiele wielu więc więcej wlasnie wszyscy wszystkich wszystkie
wszystkim wszystko wtedy wy właśnie wśród
xi xii xiii xiv xv
z za zaden zadna zadne zadnych zapewne zawsze zaś
ze zeby znow znowu znów zostal został
żaden żadna żadne żadnych że żeby""".split()
)
|
#UTF-8
# Faralaks Group (c)
# +-----------------------------------------------------------+
# | Старт программы, загрузка составных частей, инициализация |
# +-----------------------------------------------------------+
# загрузка библиотек
import pygame, webbrowser, random
from pygame import *
from cryptography.fernet import Fernet
from modules import stoper
pygame.init() # инициализация библиотеки pygame
# инициализация переменных
# основные переменные
mode = 'game'
fps_holder = pygame.time.Clock()
# переменные главного меню
menu_button = 1
sound_button = [2, 2]
# переменные игрового процесса
refresh = True
screen_pos = [50, 50]
invisible_objects = {'dungeon'}
ground = pygame.Surface((1320, 760))
# переменные внутриигрового меню
# пероеменная для главного героя. структура: [имя героя, изображение, [координаты на экране], [номер карты], [движение влево, вправо, вверх, вниз], уровень, деньги]
hero = ['name', pygame.image.load('data/pictures/hero.png'), [550,350 ], [0, 0], [False, False, False, False], 1, 999]
# создание функций и классовв
def klick_objects_loader(mode):
"""Загружает кликабельные объекты"""
done_klick_objects = {}
file = open('data/klick_objects/' + mode + '.txt')
for line in file:
line = line.strip().split(' = ')
back = []
for i in line[1].split('+'):
back.append(i.split('_'))
done_klick_objects[line[0]] = back
del back, line, i
return done_klick_objects
def stoper_loader(map, key=Fernet(b'xGsCGQLymuFmwqqsdD3Pj7cAqXhkbVOJcei01O7vO48=')):
"""Загружает координаты Стопера"""
adres = 'data/stoper/stoper' + str(map[0]) + '_' + str(map[1]) + '.frls'
try: file = open(adres)
except FileNotFoundError:
return ['']
else:
file = open(adres, 'rb')
return key.decrypt(file.read()).decode('utf8').split('+')
def object_loader(map, key=Fernet(b'xGsCGQLymuFmwqqsdD3Pj7cAqXhkbVOJcei01O7vO48=')):
"""Загружает объекты на карте"""
adres = 'data/objects/objects' + str(map[0]) + '_' + str(map[1]) + '.frls'
try: file = open(adres)
except FileNotFoundError:
return [['']]
else:
file = open(adres, 'rb')
objects = []
for object in key.decrypt(file.read()).decode('utf8').split('+'):
objects.append(object.split('_'))
return objects
# загрузка изображений
all_pictures = {} # словарь со всеми изображениями кроме Мобов и ГГ
file = open('data/picture_locations.txt')
for line in file:
back = []
line = line.strip().split(' ')
for i in range(int(line[1])):
back.append(pygame.image.load(line[2] + str(i + 1) + line[3]))
all_pictures[line[0]] = back
# загрузка цветовой кодировки
color_codes = {} # словарь с кодировками цветов
file = open('data/color_codes.txt')
for line in file:
line = line.strip().split(' ')
color_codes[line[0]] = line[1]
# загрузка карты игрового мира
konva = pygame.image.frombuffer(Fernet(b'xGsCGQLymuFmwqqsdD3Pj7cAqXhkbVOJcei01O7vO48=').decrypt(open('data/map/map.frls', 'rb').read()),(3456 ,3456), 'RGB')
konva_surf = pygame.Surface((3456, 3456))
konva_surf.blit(konva, (0, 0))
konva_pix = pygame.PixelArray(konva_surf)
del back, file, line, i, konva_surf, konva # удаление лишнего
# создание окна игры
window = pygame.display.set_mode((1280, 720), pygame.FULLSCREEN)
pygame.display.set_caption('Test') # измеенение названия окна
# +--------------+
# | Главное меню |
# +--------------+
while mode == 'main_menu':
window.blit(all_pictures[mode][menu_button - 1], (0, 0)) # фон
if menu_button == 2:
window.blit(all_pictures['sound_button'][sound_button[1] - 1], (920, 323)) # вкл/выкл музыка
window.blit(all_pictures['sound_button'][sound_button[0] - 1], (920, 357)) # вкл/выкл звук
klick_objects = klick_objects_loader(mode) # загрузки кликабельныз объектов
# обработка событий
for event in pygame.event.get():
if event.type == QUIT:
mode = 'Exit'
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
mode = 'Exit'
menu_button = 3
# события мыши
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
mouse_pos = pygame.mouse.get_pos()
#print(mouse_pos)
for klick_object in klick_objects[str(menu_button)]:
if int(klick_object[1]) <= mouse_pos[0] <= int(klick_object[3]) and int(klick_object[2]) <= mouse_pos[1] <= int(klick_object[4]):
if klick_object[0] == 'options': menu_button = 2
elif klick_object[0] == 'exit': menu_button = 3
elif klick_object[0] == 'play': menu_button = 1
elif klick_object[0] == 'soundbutton':
if sound_button[0] == 1: sound_button[0] = 2
elif sound_button[0] == 2: sound_button[0] = 1
elif klick_object[0] == 'musicbutton':
if sound_button[1] == 1: sound_button[1] = 2
elif sound_button[1] == 2: sound_button[1] = 1
elif klick_object[0] == 'openVK': webbrowser.open('https://vk.com/Faralaks')
elif klick_object[0] == 'openYT': webbrowser.open('https://www.youtube.com/Faralaks')
elif klick_object[0] == 'openSteam': webbrowser.open('http://steamcommunity.com/id/Faralaks')
elif klick_object[0] == 'openWS': webbrowser.open('ms-windows-store://pdp/?productid=9WZDNCRFJ1PT&referrer=unistoreweb&scenario=click&webig=e3428e62-12ab-4149-9556-b329133c2efc&muid=053DBBC097DC6FD0385BB10B93DC6C3E&websession=c2f554163c91413a8bafaf5459efd845')
elif klick_object[0] == 'yesexit': mode = 'Exit'
elif klick_object[0] == 'save': mode = 'game'
fps_holder.tick(60) # контроль частоты кадров (60 кадров в секунду)
display.update()
# +-----------------+
# | Игровой процесс |
# +-----------------+
while mode == 'game':
if refresh == True:
refresh = False
stoper = stoper_loader(hero[3]) # загрузка координат стопера
objects = object_loader(hero[3]) # загрузка объектов на карте
# генерация карты
for pix_y in range(hero[3][1]*36, hero[3][1]*36 + 36):
for pix_x in range(hero[3][0]*64, hero[3][0]*64 + 64):
ground.blit(random.choice(all_pictures[color_codes[str(konva_pix[pix_x, pix_y])]]), ((pix_x - hero[3][0]*64)*20, (pix_y - hero[3][1]*36)*20))
# прорисовка объектов на карте
if objects != [['']]:
for object in objects:
if object[0] in invisible_objects: continue
ground.blit(all_pictures[object[0]][int(object[1])], (int(object[2]), int(object[3])))
# обработка событий
for event in pygame.event.get():
if event.type == QUIT:
mode = 'Exit'
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
mode = 'Exit'
if event.key == K_LEFT or event.key == K_a: hero[4][0] = True
if event.key == K_RIGHT or event.key == K_d: hero[4][1] = True
if event.key == K_UP or event.key == K_w: hero[4][2] = True
if event.key == K_DOWN or event.key == K_s: hero[4][3] = True
if event.type == KEYUP:
if event.key == K_LEFT or event.key == K_a: hero[4][0] = False
if event.key == K_RIGHT or event.key == K_d: hero[4][1] = False
if event.key == K_UP or event.key == K_w: hero[4][2] = False
if event.key == K_DOWN or event.key == K_s: hero[4][3] = False
# движение при удержании клавиш
if hero[4][0] == True: hero[2][0] -= 3
if hero[4][1] == True: hero[2][0] += 3
if hero[4][2] == True: hero[2][1] -= 3
if hero[4][3] == True: hero[2][1] += 3
# переход в другую область карты
if hero[2][0] < -25:
hero[3][0] -= 1
hero[2][0] = 1255
refresh = True
elif hero[2][0] > 1255:
hero[3][0] += 1
hero[2][0] = -25
refresh = True
elif hero[2][1] < -25:
hero[3][1] -= 1
hero[2][1] = 683
refresh = True
elif hero[2][1] > 680:
hero[3][1] += 1
hero[2][1] = -25
refresh = True
window.blit(ground, (0, 0))
window.blit(hero[1], (hero[2][0], hero[2][1]))
fps_holder.tick(160) # контроль частоты кадров (60 кадров в секунду)
display.update()
pygame.quit()
print('')
print('DONE!') |
from typing import List, NoReturn
import grpc
from grpc import Channel, Server
from .user_pb2_grpc import add_UserServiceServicer_to_server, \
UserServiceServicer
from .message import CreateRequest, CreateReply, DeleteRequest, DeleteReply, \
User
__all__ = ["UserService", "register_user_service"]
class UserService(UserServiceServicer):
def Create(self, request: CreateRequest, context) -> CreateReply:
user = self.create_user(request.username, request.user_id)
return CreateReply(user=user)
def Delete(self, request: DeleteRequest, context) -> DeleteReply:
self.delete_user(request.id)
return DeleteReply()
def create_user(self, user_id: str, name: str) -> User:
raise NotImplementedError()
def delete_user(self, user_id: str) -> NoReturn:
raise NotImplementedError()
def register_user_service(service: UserService, server: Server):
"""
Register a user service with the provided gRPC server.
"""
add_UserServiceServicer_to_server(service, server)
|
import numpy as np
def divisor_method(votes: np.ndarray, seats: int, step: int) -> np.ndarray:
"""
:param votes: one-dimensional array of shape (m) where m is the number of parties
:param seats: the total number of seats available
:param step: the increasing in the divisors
:return:
"""
assert seats > 0, "at least one seat required"
assert votes.sum() >= seats, "the number of votes should be greater or equal than the number of seats"
outcome = np.zeros_like(votes)
votes_c = np.copy(votes)
divisors = np.ones_like(votes)
for _ in range(seats):
seat_taker = votes_c.argmax()
outcome[seat_taker] += 1
divisors[seat_taker] += step
votes_c[seat_taker] = votes[seat_taker] / divisors[seat_taker]
return outcome
def dhondt(votes: np.ndarray, seats: int) -> np.ndarray:
return divisor_method(votes, seats, 1)
def saint_lague(votes: np.ndarray, seats: int) -> np.ndarray:
return divisor_method(votes, seats, 2)
def largest_remainder(votes: np.ndarray, seats: int) -> np.ndarray:
assert seats > 0, "at least one seat required"
assert votes.sum() >= seats, "the number of votes should be greater or equal than the number of seats"
quota = votes.sum() / seats
outcome, remainders = np.divmod(votes, quota)
outcome = outcome.astype(int)
remaining_seats = int(seats - outcome.sum())
to_add = np.argpartition(-remainders, remaining_seats)[:remaining_seats]
outcome[to_add] += 1
return outcome
def tryall(votes: np.ndarray, seats: int):
print(f"D'Hondt\t\t\t{dhondt(votes,seats)}")
print(f"SaintLague\t\t{saint_lague(votes,seats)}")
print(f"LargestRemainder\t{largest_remainder(votes,seats)}") |
import dataset
from missingPatterns import MissingPatterns
if __name__ == "__main__":
# execute only if run as a script
d = dataset.Dataset()
mp = MissingPatterns(d).missing_patterns
for i in mp:
print(set(i)) |
# Generated by Django 3.1.1 on 2021-05-22 12:01
from django.db import migrations, models
import elephant.utils.utils
class Migration(migrations.Migration):
dependencies = [
('account', '0002_auto_20210522_1453'),
]
operations = [
migrations.AlterField(
model_name='account',
name='avatar',
field=models.FileField(null=True, upload_to=elephant.utils.utils.PathAndRename('account/avatar')),
),
]
|
# bootstraps the application
# copyright (c) 2018 wildduck.io
from app_src import app
if __name__ == '__main__':
debug = app.config['DEBUG']
host = app.config['HOST']
app.run(debug=debug, host=host)
|
# Copyright 2021 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Core uilities for the functionality of Metadata Service API.
"""
import logging
import datetime
from typing import Dict
from pydantic import BaseModel
from metadata_service.database import DBConnect
async def _get_reference(document_id: str, collection_name: str) -> Dict:
"""Given a document ID and a collection name, query the metadata store
and return the document.
Args:
document_id: The ID of the document
collection_name: The collection in the metadata store that has the document
Returns
The document corresponding to ``document_id``
"""
db_connect = DBConnect()
collection = await db_connect.get_collection(collection_name)
doc = await collection.find_one({"id": document_id}) # type: ignore
if not doc:
logging.warning(
"Reference with ID %s not found in collection %s",
document_id,
collection_name,
)
return doc
async def embed_references(parent_document: Dict, document_type: BaseModel) -> Dict:
"""Given a document and a document type, identify the references in ``parent_document``
and query the metadata store. After retrieving the referenced objects, embed them in place
of the reference in the parent document.
Args:
parent_document: The parent document that has one or more references
document_type: An instance of ``pydantic.BaseModel``
Returns
The denormalize/embedded document
"""
for field, referenced_obj in document_type.__references__:
if field in parent_document and parent_document[field]:
if isinstance(parent_document[field], str):
referenced_doc = await _get_reference(
parent_document[field], referenced_obj.__collection__
)
referenced_doc = await embed_references(referenced_doc, referenced_obj)
parent_document[field] = referenced_doc
elif isinstance(parent_document[field], (list, set, tuple)):
docs = []
for ref in parent_document[field]:
referenced_doc = await _get_reference(
ref, referenced_obj.__collection__
)
referenced_doc = await embed_references(
referenced_doc, referenced_obj
)
docs.append(referenced_doc)
parent_document[field] = docs
else:
raise ValueError(
f"Unexpected value type for field {field} in parent object {parent_document}"
)
return parent_document
async def get_timestamp() -> str:
"""
Get the current timestamp in UTC according to ISO 8601
Returns:
The timestamp as a string
"""
return datetime.datetime.isoformat(datetime.datetime.utcnow())
|
from oauthlib.oauth2 import LegacyApplicationClient
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
def get_token(client_id,client_secret, token_url):
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
token = oauth.fetch_token(token_url=token_url, client_id=client_id, client_secret=client_secret)
return token
|
# This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS helpers, provided as out-of-context methods.
"""
from __future__ import absolute_import
import struct
from scapy.compat import orb, chb
from scapy.error import warning
from scapy.fields import (ByteEnumField, ShortEnumField,
FieldLenField, StrLenField)
from scapy.packet import Packet
from scapy.layers.tls.basefields import _tls_type, _tls_version
class TLSPlaintext(Packet):
name = "TLS Plaintext"
fields_desc = [ByteEnumField("type", None, _tls_type),
ShortEnumField("version", None, _tls_version),
FieldLenField("len", None, length_of="data", fmt="!H"),
StrLenField("data", "",
length_from=lambda pkt: pkt.len)]
class TLSCompressed(TLSPlaintext):
name = "TLS Compressed"
class TLSCiphertext(TLSPlaintext):
name = "TLS Ciphertext"
def _tls_compress(alg, p):
"""
Compress p (a TLSPlaintext instance) using compression algorithm instance
alg and return a TLSCompressed instance.
"""
c = TLSCompressed()
c.type = p.type
c.version = p.version
c.data = alg.compress(p.data)
c.len = len(c.data)
return c
def _tls_decompress(alg, c):
"""
Decompress c (a TLSCompressed instance) using compression algorithm
instance alg and return a TLSPlaintext instance.
"""
p = TLSPlaintext()
p.type = c.type
p.version = c.version
p.data = alg.decompress(c.data)
p.len = len(p.data)
return p
def _tls_mac_add(alg, c, write_seq_num):
"""
Compute the MAC using provided MAC alg instance over TLSCiphertext c using
current write sequence number write_seq_num. Computed MAC is then appended
to c.data and c.len is updated to reflect that change. It is the
caller responsibility to increment the sequence number after the operation.
The function has no return value.
"""
write_seq_num = struct.pack("!Q", write_seq_num)
h = alg.digest(write_seq_num + bytes(c))
c.data += h
c.len += alg.hash_len
def _tls_mac_verify(alg, p, read_seq_num):
"""
Verify if the MAC in provided message (message resulting from decryption
and padding removal) is valid. Current read sequence number is used in
the verification process.
If the MAC is valid:
- The function returns True
- The packet p is updated in the following way: trailing MAC value is
removed from p.data and length is updated accordingly.
In case of error, False is returned, and p may have been modified.
Also note that it is the caller's responsibility to update the read
sequence number after the operation.
"""
h_size = alg.hash_len
if p.len < h_size:
return False
received_h = p.data[-h_size:]
p.len -= h_size
p.data = p.data[:-h_size]
read_seq_num = struct.pack("!Q", read_seq_num)
h = alg.digest(read_seq_num + bytes(p))
return h == received_h
def _tls_add_pad(p, block_size):
"""
Provided with cipher block size parameter and current TLSCompressed packet
p (after MAC addition), the function adds required, deterministic padding
to p.data before encryption step, as it is defined for TLS (i.e. not
SSL and its allowed random padding). The function has no return value.
"""
padlen = -p.len % block_size
padding = chb(padlen) * (padlen + 1)
p.len += len(padding)
p.data += padding
def _tls_del_pad(p):
"""
Provided with a just decrypted TLSCiphertext (now a TLSPlaintext instance)
p, the function removes the trailing padding found in p.data. It also
performs some sanity checks on the padding (length, content, ...). False
is returned if one of the check fails. Otherwise, True is returned,
indicating that p.data and p.len have been updated.
"""
if p.len < 1:
warning("Message format is invalid (padding)")
return False
padlen = orb(p.data[-1])
padsize = padlen + 1
if p.len < padsize:
warning("Invalid padding length")
return False
if p.data[-padsize:] != chb(padlen) * padsize:
warning("Padding content is invalid %s", repr(p.data[-padsize:]))
return False
p.data = p.data[:-padsize]
p.len -= padsize
return True
def _tls_encrypt(alg, p):
"""
Provided with an already MACed TLSCompressed packet, and a stream or block
cipher alg, the function converts it into a TLSCiphertext (i.e. encrypts it
and updates length). The function returns a newly created TLSCiphertext
instance.
"""
c = TLSCiphertext()
c.type = p.type
c.version = p.version
c.data = alg.encrypt(p.data)
c.len = len(c.data)
return c
def _tls_decrypt(alg, c):
"""
Provided with a TLSCiphertext instance c, and a stream or block cipher alg,
the function decrypts c.data and returns a newly created TLSPlaintext.
"""
p = TLSPlaintext()
p.type = c.type
p.version = c.version
p.data = alg.decrypt(c.data)
p.len = len(p.data)
return p
def _tls_aead_auth_encrypt(alg, p, write_seq_num):
"""
Provided with a TLSCompressed instance p, the function applies AEAD
cipher alg to p.data and builds a new TLSCiphertext instance. Unlike
for block and stream ciphers, for which the authentication step is done
separately, AEAD alg does it simultaneously: this is the reason why
write_seq_num is passed to the function, to be incorporated in
authenticated data. Note that it is the caller's responsibility to increment # noqa: E501
write_seq_num afterwards.
"""
P = bytes(p)
write_seq_num = struct.pack("!Q", write_seq_num)
A = write_seq_num + P[:5]
c = TLSCiphertext()
c.type = p.type
c.version = p.version
c.data = alg.auth_encrypt(P, A, write_seq_num)
c.len = len(c.data)
return c
def _tls_aead_auth_decrypt(alg, c, read_seq_num):
"""
Provided with a TLSCiphertext instance c, the function applies AEAD
cipher alg auth_decrypt function to c.data (and additional data)
in order to authenticate the data and decrypt c.data. When those
steps succeed, the result is a newly created TLSCompressed instance.
On error, None is returned. Note that it is the caller's responsibility to
increment read_seq_num afterwards.
"""
# 'Deduce' TLSCompressed length from TLSCiphertext length
# There is actually no guaranty of this equality, but this is defined as
# such in TLS 1.2 specifications, and it works for GCM and CCM at least.
#
plen = c.len - getattr(alg, "nonce_explicit_len", 0) - alg.tag_len
read_seq_num = struct.pack("!Q", read_seq_num)
A = read_seq_num + struct.pack('!BHH', c.type, c.version, plen)
p = TLSCompressed()
p.type = c.type
p.version = c.version
p.len = plen
p.data = alg.auth_decrypt(A, c.data, read_seq_num)
if p.data is None: # Verification failed.
return None
return p
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 08:15:32 2018
Computes the global extrema metrics for regridded and reference target data
@author: jeguerra
"""
import numpy as np
def computeGlobalExtremaMetrics(varS2T, varST):
Lden = (np.max(abs(varST)) - np.min(abs(varST)))
L_min = min(np.amin(varS2T) - np.amin(varST), 0.0) / \
Lden # < 0 indicates failure
L_max = max(np.amax(varS2T) - np.amax(varST), 0.0) / \
Lden # > 0 indicates failure
return L_min, L_max
|
from setuptools import setup, find_packages
setup(
name='cnm_usage',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
'Requests',
],
entry_points='''
[console_scripts]
cnm_usage=cnm_usage.cli:main
''',
author='Sal Koritz',
description='cli app to pull usage data from cnMaestro API'
)
|
from sympy import Expr, Number, NumberSymbol
from vector_calculus.containers import Vector, Tensor
from vector_calculus.operators import dot, inner
from measure import Measure
class SurfaceMeasure(Measure):
'''
SurfaceMeasure is defined over sets which have same geometrical dimension
one higher than topological dimension.
'''
def __init__(self, domain):
assert domain.gdim == domain.tdim + 1, \
'Invalid domain tdim(%d) + 1!= gdim(%d)' % (domain.tdim, domain.gdim)
Measure.__init__(self, domain)
def __rmul__(self, integrand):
'''Integrate over domain.'''
# Scalar integral is f(x(s, t), y(s, t))*|d(x, y)/ds x d(x, y)/dt| ds dt
# Result is number
if isinstance(integrand, (Expr, Number, NumberSymbol, int, float)):
# Note that Jacobian for thich domain is defines as size of the
# normal vector
integrand = integrand*self.domain.J
return self(integrand)
# Vector integral is
# inner(f(x(s, t), y(s, t)), (d(x, y)/ds x d(x, y)/dt)) ds dt
# Result is number
elif isinstance(integrand, Vector):
assert len(integrand) == self.domain.gdim, 'Gdim mismatch'
integrand = inner(integrand, self.domain.n)
return self(integrand)
# Tensor integral is
# dot(f(x(s, t), y(s, t)), (d(x, y)/ds x d(x, y)/dt)) ds dt
# Result is vector
elif isinstance(integrand, Tensor):
assert len(integrand) == self.domain.gdim, 'Gdim mismatch'
integrand = dot(integrand, self.domain.n)
return Vector([vi*self for vi in integrand])
# Nope
else:
raise TypeError('No surface integral of type %s' % type(integrand))
class dS(SurfaceMeasure):
'''
Convenience function for defining surface integrals over 'common' domains.
'''
# dS(A, B); face of tri; A, B are 2d
# dS(A, B, C, index)
# dS(A, B, C) face of the tet; A, B, C in 3d
# dS(A, B, C, D, index)
# dS([[[], []], index) face of rectangle
# dS([[], [], []], index) face of box
pass
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from parametrized_set import Triangle
from sympy import symbols
A = [1, 0, 0]
B = [0, 1, 0]
C = [0, 0, 1]
dS = SurfaceMeasure(Triangle(A, B, C))
x, y, z = symbols('x, y, z')
f = x + y + z
v = Vector([x, y, z])
print v*dS
T = Tensor([[x, 0, 0], [0, y, 0], [0, 0, z]])
print T*dS
|
#!/usr/bin/env python3
from roman_numerals import RomanNumeral
class TestRomanNumeral():
rn2int_values = [
('mdcxl', 1640),
('mcm', 1900),
('md', 1500),
('mmmccc', 3300),
('mcmc', 2000),
('MCMLIV', 1954),
('MCMXC', 1990),
('MMXIV', 2014),
('MCMIII', 1903),
('MDCDIII', 1903),
('MDCCCCIII', 1903),
]
int2rn_values = [
('mdcxl', 1640),
('mcm', 1900),
('md', 1500),
('mmmccc', 3300),
('MCMLIV', 1954),
('MCMXC', 1990),
('MMXIV', 2014),
('MCMIII', 1903),
]
def rn2int():
for roman, int_value in TestRomanNumeral.rn2int_values:
rn = RomanNumeral(roman)
assert rn.int == int_value, rn
assert rn.roman == roman.upper(), rn
print('Passed rn2int test')
def int2rn():
for roman, int_value in TestRomanNumeral.int2rn_values:
rn = RomanNumeral(int_value)
assert rn.int == int_value, rn
assert rn.roman == roman.upper(), rn
print('Passed int2rn test')
def test():
TestRomanNumeral.rn2int()
TestRomanNumeral.int2rn()
if __name__ == '__main__':
TestRomanNumeral.test()
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_healthchecks_health_checks_vantage_point_facts
short_description: Fetches details about one or multiple HealthChecksVantagePoint resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple HealthChecksVantagePoint resources in Oracle Cloud Infrastructure
- Gets information about all vantage points available to the user.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
sort_by:
description:
- The field to sort by when listing vantage points.
type: str
choices:
- "name"
- "displayName"
sort_order:
description:
- Controls the sort order of results.
type: str
choices:
- "ASC"
- "DESC"
name:
description:
- Filters results that exactly match the `name` field.
type: str
display_name:
description:
- Filters results that exactly match the `displayName` field.
type: str
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List health_checks_vantage_points
oci_healthchecks_health_checks_vantage_point_facts:
# optional
sort_by: name
sort_order: ASC
name: name_example
display_name: display_name_example
"""
RETURN = """
health_checks_vantage_points:
description:
- List of HealthChecksVantagePoint resources
returned: on success
type: complex
contains:
display_name:
description:
- The display name for the vantage point. Display names are determined by
the best information available and may change over time.
returned: on success
type: str
sample: display_name_example
provider_name:
description:
- The organization on whose infrastructure this vantage point resides.
Provider names are not unique, as Oracle Cloud Infrastructure maintains
many vantage points in each major provider.
returned: on success
type: str
sample: provider_name_example
name:
description:
- The unique, permanent name for the vantage point.
returned: on success
type: str
sample: name_example
geo:
description:
- ""
returned: on success
type: complex
contains:
geo_key:
description:
- An opaque identifier for the geographic location of the vantage point.
returned: on success
type: str
sample: geo_key_example
admin_div_code:
description:
- The ISO 3166-2 code for this location's first-level administrative
division, either a US state or Canadian province. Only included for locations
in the US or Canada. For a list of codes, see
L(Country Codes,https://www.iso.org/obp/ui/#search).
returned: on success
type: str
sample: admin_div_code_example
city_name:
description:
- Common English-language name for the city.
returned: on success
type: str
sample: city_name_example
country_code:
description:
- The ISO 3166-1 alpha-2 country code. For a list of codes,
see L(Country Codes,https://www.iso.org/obp/ui/#search).
returned: on success
type: str
sample: country_code_example
country_name:
description:
- The common English-language name for the country.
returned: on success
type: str
sample: country_name_example
latitude:
description:
- Degrees north of the Equator.
returned: on success
type: float
sample: 3.4
longitude:
description:
- Degrees east of the prime meridian.
returned: on success
type: float
sample: 3.4
routing:
description:
- An array of objects that describe how traffic to this vantage point is
routed, including which prefixes and ASNs connect it to the internet.
- The addresses are sorted from the most-specific to least-specific
prefix (the smallest network to largest network). When a prefix has
multiple origin ASNs (MOAS routing), they are sorted by weight
(highest to lowest). Weight is determined by the total percentage of
peers observing the prefix originating from an ASN. Only present if
`fields` includes `routing`. The field will be null if the address's
routing information is unknown.
returned: on success
type: complex
contains:
as_label:
description:
- The registry label for `asn`, usually the name of the organization that
owns the ASN. May be omitted or null.
returned: on success
type: str
sample: as_label_example
asn:
description:
- The Autonomous System Number (ASN) identifying the organization
responsible for routing packets to `prefix`.
returned: on success
type: int
sample: 56
prefix:
description:
- An IP prefix (CIDR syntax) that is less specific than
`address`, through which `address` is routed.
returned: on success
type: str
sample: prefix_example
weight:
description:
- An integer between 0 and 100 used to select between multiple
origin ASNs when routing to `prefix`. Most prefixes have
exactly one origin ASN, in which case `weight` will be 100.
returned: on success
type: int
sample: 56
sample: [{
"display_name": "display_name_example",
"provider_name": "provider_name_example",
"name": "name_example",
"geo": {
"geo_key": "geo_key_example",
"admin_div_code": "admin_div_code_example",
"city_name": "city_name_example",
"country_code": "country_code_example",
"country_name": "country_name_example",
"latitude": 3.4,
"longitude": 3.4
},
"routing": [{
"as_label": "as_label_example",
"asn": 56,
"prefix": "prefix_example",
"weight": 56
}]
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.healthchecks import HealthChecksClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class HealthChecksVantagePointFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return []
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"name",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_health_checks_vantage_points, **optional_kwargs
)
HealthChecksVantagePointFactsHelperCustom = get_custom_class(
"HealthChecksVantagePointFactsHelperCustom"
)
class ResourceFactsHelper(
HealthChecksVantagePointFactsHelperCustom, HealthChecksVantagePointFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
sort_by=dict(type="str", choices=["name", "displayName"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
name=dict(type="str"),
display_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="health_checks_vantage_point",
service_client_class=HealthChecksClient,
namespace="healthchecks",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(health_checks_vantage_points=result)
if __name__ == "__main__":
main()
|
import pickle
import numpy as np
method_nlpd = np.zeros([17, 10])
for method in range(17):
for fold in range(10):
with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp:
method_nlpd[method, fold] = pickle.load(fp)
np.set_printoptions(precision=3)
print(np.mean(method_nlpd, axis=1))
np.set_printoptions(precision=2)
print(np.std(method_nlpd, axis=1))
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import call
import pytest
from assertpy import assert_that
from common.schedulers.slurm_commands import (
PartitionStatus,
SlurmJob,
SlurmNode,
SlurmPartition,
_batch_node_info,
_parse_nodes_info,
get_jobs_info,
get_pending_jobs_info,
is_static_node,
parse_nodename,
set_nodes_down,
set_nodes_drain,
set_nodes_idle,
set_nodes_power_down,
update_all_partitions,
update_nodes,
update_partitions,
)
from tests.common import read_text
@pytest.mark.parametrize(
(
"nodename",
"expected_queue",
"expected_node_type",
"expected_instance_name",
"expected_failure",
),
[
("queue1-st-c5xlarge-1", "queue1", "st", "c5xlarge", False),
("queue-1-st-c5xlarge-1", "queue-1", "st", "c5xlarge", False),
("queue1-st-dy-c5xlarge-1", "queue1-st", "dy", "c5xlarge", False),
("queue1-dy-st-c5xlarge-1", "queue1-dy", "st", "c5xlarge", False),
("queue1-dy-dy-dy-dy-c5xlarge-1", "queue1-dy-dy-dy", "dy", "c5xlarge", False),
("queue1-st-i3enmetal2tb-1", "queue1", "st", "i3enmetal2tb", False),
("queue1-st-u6tb1metal-1", "queue1", "st", "u6tb1metal", False),
("queue1-st-c5.xlarge-1", None, None, None, True),
("queue_1-st-c5-xlarge-1", None, None, None, True),
],
)
def test_parse_nodename(nodename, expected_queue, expected_node_type, expected_instance_name, expected_failure):
if expected_failure:
with pytest.raises(Exception):
parse_nodename(nodename)
else:
queue_name, node_type, instance_name = parse_nodename(nodename)
assert_that(expected_queue).is_equal_to(queue_name)
assert_that(expected_node_type).is_equal_to(node_type)
assert_that(expected_instance_name).is_equal_to(instance_name)
@pytest.mark.parametrize(
("nodename", "expected_is_static"),
[
("queue1-st-c5xlarge-1", True),
("queue-1-st-c5xlarge-1", True),
("queue1-st-dy-c5xlarge-1", False),
("queue1-dy-st-c5xlarge-1", True),
("queue1-dy-dy-dy-dy-c5xlarge-1", False),
("queue1-st-i3enmetal2tb-1", True),
("queue1-st-u6tb1metal-1", True),
],
)
def test_is_static_node(nodename, expected_is_static):
assert_that(expected_is_static).is_equal_to(is_static_node(nodename))
@pytest.mark.parametrize(
"squeue_mocked_response, expected_output",
[
(
"squeue_output_mix.txt",
[
SlurmJob(
cpus_total=5,
cpus_min_per_node=1,
cpus_per_task=1,
state="PD",
nodes=2,
tasks=5,
id="72",
pending_reason="Resources",
tres_per_job={},
tres_per_task={},
cpus_per_tres={},
),
SlurmJob(
cpus_total=10,
cpus_min_per_node=1,
cpus_per_task=1,
state="R",
nodes=3,
tasks=10,
id="84",
pending_reason="Resources",
tres_per_job={"gpu": 12},
tres_per_task={},
cpus_per_tres={},
),
SlurmJob(
cpus_total=40,
cpus_min_per_node=4,
cpus_per_task=4,
state="PD",
nodes=10,
tasks=10,
id="86",
pending_reason="ReqNodeNotAvail, May be reserved for other job",
tres_per_job={},
tres_per_task={"gpu": 4},
cpus_per_tres={},
),
SlurmJob(
cpus_total=10,
cpus_min_per_node=1,
cpus_per_task=1,
state="PD",
nodes=10,
tasks=10,
id="87",
pending_reason="ReqNodeNotAvail, May be reserved for other job",
tres_per_job={"gpu": 12},
tres_per_task={"gpu": 4},
tres_per_node={"gpu": 6},
cpus_per_tres={},
),
SlurmJob(
cpus_total=15,
cpus_min_per_node=3,
cpus_per_task=3,
state="PD",
nodes=4,
tasks=5,
id="90_1",
pending_reason="PartitionConfig",
tres_per_job={"gpu": 12},
tres_per_task={"gpu": 4},
tres_per_node={"gpu": 6},
cpus_per_tres={},
),
SlurmJob(
cpus_total=15,
cpus_min_per_node=3,
cpus_per_task=3,
state="PD",
nodes=4,
tasks=5,
id="90_2",
pending_reason="PartitionNodeLimit",
tres_per_job={"gpu": 12},
tres_per_task={"gpu": 4},
tres_per_node={"gpu": 6},
cpus_per_tres={"gpu": 5},
),
SlurmJob(
cpus_total=15,
cpus_min_per_node=3,
cpus_per_task=3,
state="PD",
nodes=4,
tasks=5,
id="90_3",
pending_reason="Resources",
tres_per_job={"gpu": 12},
tres_per_task={"gpu": 4},
tres_per_node={"gpu": 6},
cpus_per_tres={"gpu": 5},
),
],
),
(
"squeue_output_extra_column.txt",
[
SlurmJob(
id="72",
state="PD",
nodes=2,
tasks=5,
cpus_total=5,
cpus_min_per_node=1,
cpus_per_task=1,
pending_reason="Resources",
)
],
),
(
"squeue_output_missing_column.txt",
[
SlurmJob(
cpus_total=5,
tres_per_job=None,
cpus_min_per_node=0,
cpus_per_task=1,
tres_per_task=None,
state="",
nodes=2,
tasks=5,
id="72",
pending_reason="Resources",
)
],
),
("squeue_output_empty.txt", []),
],
ids=["mixed_output", "extra_column", "missing_column", "empty"],
)
def test_get_jobs_info(squeue_mocked_response, expected_output, test_datadir, mocker):
qstat_output = read_text(test_datadir / squeue_mocked_response)
mock = mocker.patch(
"common.schedulers.slurm_commands.check_command_output", return_value=qstat_output, autospec=True
)
jobs = get_jobs_info(job_state_filter="PD,R")
mock.assert_called_with(
"/opt/slurm/bin/squeue -r -O 'jobid:200,statecompact:200,numnodes:200,numcpus:200,numtasks:200,"
"cpus-per-task:200,mincpus:200,reason:200,tres-per-job:200,tres-per-task:200,tres-per-node:200,"
"cpus-per-tres:200' --states PD,R"
)
assert_that(jobs).is_equal_to(expected_output)
@pytest.mark.parametrize(
"pending_jobs, instance_properties, max_nodes_filter, filter_by_pending_reasons, expected_output",
[
(
[SlurmJob(id="72", state="PD", nodes=2, cpus_total=5, cpus_min_per_node=1, pending_reason="Priority")],
{"slots": 4, "gpus": 0},
2,
["Priority"],
[SlurmJob(id="72", state="PD", nodes=2, cpus_total=5, cpus_min_per_node=1, pending_reason="Priority")],
),
(
[
SlurmJob(id="72", state="PD", nodes=2, cpus_total=4, cpus_min_per_node=2, pending_reason="Priority"),
SlurmJob(
id="73", state="PD", nodes=2, cpus_total=5, cpus_min_per_node=2, pending_reason="Priority"
), # nodes gets incremented by 1
SlurmJob(id="74", state="PD", nodes=1, cpus_total=2, cpus_min_per_node=1, pending_reason="Priority"),
],
{"slots": 2, "gpus": 0},
2,
["Priority"],
[
SlurmJob(id="72", state="PD", nodes=2, cpus_total=4, cpus_min_per_node=2, pending_reason="Priority"),
SlurmJob(id="74", state="PD", nodes=1, cpus_total=2, cpus_min_per_node=1, pending_reason="Priority"),
],
),
(
[SlurmJob(id="72", state="PD", nodes=2, cpus_total=5, cpus_min_per_node=1, pending_reason="Priority")],
{"slots": 1, "gpus": 0},
1,
["Priority"],
[],
),
(
[SlurmJob(id="72", state="PD", nodes=2, cpus_total=5, cpus_min_per_node=5, pending_reason="Priority")],
{"slots": 4, "gpus": 0},
2,
["Priority"],
[],
),
(
[
SlurmJob(
id="72", state="PD", nodes=2, cpus_total=2, cpus_min_per_node=1, pending_reason="PartitionNodeLimit"
)
],
{"slots": 2, "gpus": 0},
2,
["Priority"],
[],
),
(
[SlurmJob(id="72", state="PD", nodes=4, cpus_total=15, cpus_min_per_node=3, pending_reason="Priority")],
{"slots": 4, "gpus": 0},
5,
[],
[
SlurmJob(
id="72",
state="PD",
nodes=5, # nodes got incremented by 1
cpus_total=15,
cpus_min_per_node=3,
pending_reason="Priority",
)
],
),
(
[SlurmJob(id="72", state="PD", nodes=4, cpus_total=15, cpus_min_per_node=3, pending_reason="Priority")],
{"slots": 4, "gpus": 0},
4,
[],
[],
),
(
[SlurmJob(id="72", state="PD", nodes=4, cpus_total=15, cpus_min_per_node=3, pending_reason="Priority")],
None,
None,
None,
[SlurmJob(id="72", state="PD", nodes=4, cpus_total=15, cpus_min_per_node=3, pending_reason="Priority")],
),
(
[
# sbatch --gpus=3 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 3},
tres_per_task={},
),
# sbatch --gpus=12 - recompute number of nodes
SlurmJob(
id="2",
state="PD",
nodes=3,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 12},
tres_per_task={},
),
# sbatch --gpus=13 - recompute number of nodes and discard
SlurmJob(
id="3",
state="PD",
nodes=1,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 13},
tres_per_task={},
),
# sbatch --gpus=4 -N 2 - no changes required
SlurmJob(
id="4",
state="PD",
nodes=2,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 4},
tres_per_task={},
),
],
{"slots": 32, "gpus": 4},
3,
["Priority"],
[
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 3},
tres_per_task={},
),
SlurmJob(
id="2",
state="PD",
nodes=3,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 12},
tres_per_task={},
),
SlurmJob(
id="4",
state="PD",
nodes=2,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 4},
tres_per_task={},
),
],
),
(
[
# sbatch --gpus-per-task=2 -n 2 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=2,
cpus_per_task=1,
cpus_total=2,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --gpus-per-task=2 -n 3 - recompute number of nodes
SlurmJob(
id="2",
state="PD",
nodes=1,
tasks=3,
cpus_per_task=1,
cpus_total=3,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --wrap "sleep 100" --gpus-per-task=2 -n 3 -N 3 - no changes required
SlurmJob(
id="3",
state="PD",
nodes=3,
tasks=3,
cpus_per_task=1,
cpus_total=3,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --wrap "sleep 100" --gpus-per-task=2 -n 3 -c 22 - no changes required
SlurmJob(
id="4",
state="PD",
nodes=3,
tasks=3,
cpus_per_task=22,
cpus_total=66,
cpus_min_per_node=22,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --gpus-per-task=2 -n 3 - recompute number of nodes and discard
SlurmJob(
id="5",
state="PD",
nodes=1,
tasks=7,
cpus_per_task=1,
cpus_total=7,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
],
{"slots": 32, "gpus": 4},
3,
["Priority"],
[
# sbatch --gpus-per-task=2 -n 2 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=2,
cpus_per_task=1,
cpus_total=2,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --gpus-per-task=2 -n 3 - recompute number of nodes
SlurmJob(
id="2",
state="PD",
nodes=2,
tasks=3,
cpus_per_task=1,
cpus_total=3,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --wrap "sleep 100" --gpus-per-task=2 -n 3 -N 3 - no changes required
SlurmJob(
id="3",
state="PD",
nodes=3,
tasks=3,
cpus_per_task=1,
cpus_total=3,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
# sbatch --wrap "sleep 100" --gpus-per-task=2 -n 3 -c 22 - no changes required
SlurmJob(
id="4",
state="PD",
nodes=3,
tasks=3,
cpus_per_task=22,
cpus_total=66,
cpus_min_per_node=22,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 2},
),
],
),
(
[
# sbatch --gpus-per-task=5 -n 3 - nodes recomputed
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=3,
cpus_per_task=1,
cpus_total=3,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 5},
)
],
{"slots": 32, "gpus": 8},
3,
["Priority"],
[
# sbatch --gpus-per-task=5 -n 3 - nodes recomputed
SlurmJob(
id="1",
state="PD",
nodes=3,
tasks=3,
cpus_per_task=1,
cpus_total=3,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={"gpu": 5},
)
],
),
(
[
# sbatch --wrap "sleep 100" -n 40 --gpus-per-node=1 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=2,
tasks=40,
cpus_per_task=1,
cpus_total=40,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={},
),
# sbatch --wrap "sleep 100" --gres=gpu:4 -n 2 -c 20 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=2,
tasks=2,
cpus_per_task=20,
cpus_total=40,
cpus_min_per_node=20,
pending_reason="Priority",
tres_per_job={},
tres_per_task={},
),
],
{"slots": 32, "gpus": 4},
3,
["Priority"],
[
# sbatch --wrap "sleep 100" -n 40 --gpus-per-node=1 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=2,
tasks=40,
cpus_per_task=1,
cpus_total=40,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={},
tres_per_task={},
),
# sbatch --wrap "sleep 100" --gres=gpu:4 -n 2 -c 20 - no changes required
SlurmJob(
id="1",
state="PD",
nodes=2,
tasks=2,
cpus_per_task=20,
cpus_total=40,
cpus_min_per_node=20,
pending_reason="Priority",
tres_per_job={},
tres_per_task={},
),
],
),
(
[
# sbatch --wrap "sleep 100" --gpus=4 --gpus-per-node=1 - discarded
SlurmJob(
id="1",
state="PD",
nodes=4,
tasks=1,
cpus_per_task=1,
cpus_total=4,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 4},
tres_per_task={},
),
# sbatch --wrap "sleep 100" --gpus=5 --cpus-per-gpu=15 - recompute number of nodes, recompute cpus_total
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 5},
tres_per_task={},
cpus_per_tres={"gpu": 15},
),
# sbatch --wrap "sleep 100" --gpus=10 --cpus-per-gpu=10 - discarded
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 10},
tres_per_task={},
cpus_per_tres={"gpu": 10},
),
# sbatch --wrap "sleep 100" -n 1 -c 33 --gpus=10 --cpus-per-gpu=1 - discarded
SlurmJob(
id="1",
state="PD",
nodes=1,
tasks=1,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=33,
pending_reason="Priority",
tres_per_job={"gpu": 10},
tres_per_task={},
cpus_per_tres={"gpu": 1},
),
# sbatch --wrap "sleep 100" --gpus=5 --gpus-per-task=1 - recomputed number of nodes
SlurmJob(
id="2",
state="PD",
nodes=1,
tasks=5,
cpus_per_task=1,
cpus_total=1,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 5},
tres_per_task={"gpu": 1},
),
],
{"slots": 32, "gpus": 4},
3,
["Priority"],
[
# sbatch --wrap "sleep 100" --gpus=4 --cpus-per-gpu=9 - recompute number of nodes, recompute cpus_total
SlurmJob(
id="1",
state="PD",
nodes=3,
tasks=1,
cpus_per_task=1,
cpus_total=75,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 5},
tres_per_task={},
cpus_per_tres={"gpu": 15},
),
# sbatch --wrap "sleep 100" --gpus=5 --gpus-per-task=1 - recomputed number of nodes
SlurmJob(
id="2",
state="PD",
nodes=2,
tasks=5,
cpus_per_task=1,
cpus_total=2,
cpus_min_per_node=1,
pending_reason="Priority",
tres_per_job={"gpu": 5},
tres_per_task={"gpu": 1},
),
],
),
],
ids=[
"single",
"multiple",
"max_nodes",
"max_cpus",
"filter_state",
"additional_node_required",
"discarded_after_node_adjustment",
"no_filters",
"gpus_per_job",
"gpus_per_task",
"gpus_per_task_2",
"gpus_per_node",
"gpus_mix",
],
)
def test_get_pending_jobs_info(
pending_jobs, instance_properties, max_nodes_filter, filter_by_pending_reasons, expected_output, mocker
):
mock = mocker.patch("common.schedulers.slurm_commands.get_jobs_info", return_value=pending_jobs, autospec=True)
pending_jobs = get_pending_jobs_info(instance_properties, max_nodes_filter, filter_by_pending_reasons)
mock.assert_called_with(job_state_filter="PD")
assert_that(pending_jobs).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node_info, expected_parsed_nodes_output",
[
(
(
"multiple-dy-c5xlarge-1\n"
"172.31.10.155\n"
"172-31-10-155\n"
"MIXED+CLOUD\n"
"multiple-dy-c5xlarge-2\n"
"172.31.7.218\n"
"172-31-7-218\n"
"IDLE+CLOUD+POWER\n"
"multiple-dy-c5xlarge-3\n"
"multiple-dy-c5xlarge-3\n"
"multiple-dy-c5xlarge-3\n"
"IDLE+CLOUD+POWER"
),
[
SlurmNode("multiple-dy-c5xlarge-1", "172.31.10.155", "172-31-10-155", "MIXED+CLOUD"),
SlurmNode("multiple-dy-c5xlarge-2", "172.31.7.218", "172-31-7-218", "IDLE+CLOUD+POWER"),
SlurmNode(
"multiple-dy-c5xlarge-3",
"multiple-dy-c5xlarge-3",
"multiple-dy-c5xlarge-3",
"IDLE+CLOUD+POWER",
),
],
)
],
)
def test_parse_nodes_info(node_info, expected_parsed_nodes_output, mocker):
assert_that(_parse_nodes_info(node_info)).is_equal_to(expected_parsed_nodes_output)
@pytest.mark.parametrize(
"nodenames, nodeaddrs, hostnames, batch_size, expected_result",
[
(
"queue1-st-c5xlarge-1,queue1-st-c5xlarge-2,queue1-st-c5xlarge-3",
None,
None,
2,
[("queue1-st-c5xlarge-1,queue1-st-c5xlarge-2,queue1-st-c5xlarge-3", None, None)],
),
(
# Only split on commas after bucket
# So nodename like queue1-st-c5xlarge-[1,3] can be processed safely
"queue1-st-c5xlarge-[1-2],queue1-st-c5xlarge-2,queue1-st-c5xlarge-3,queue1-st-c5xlarge-[4,6]",
"nodeaddr-[1-2],nodeaddr-2,nodeaddr-3,nodeaddr-[4,6]",
None,
2,
[
(
"queue1-st-c5xlarge-[1-2],queue1-st-c5xlarge-2,queue1-st-c5xlarge-3,queue1-st-c5xlarge-[4,6]",
"nodeaddr-[1-2],nodeaddr-2,nodeaddr-3,nodeaddr-[4,6]",
None,
)
],
),
(
"queue1-st-c5xlarge-[1-2],queue1-st-c5xlarge-2,queue1-st-c5xlarge-[3],queue1-st-c5xlarge-[4,6]",
"nodeaddr-[1-2],nodeaddr-2,nodeaddr-[3],nodeaddr-[4,6]",
"nodehostname-[1-2],nodehostname-2,nodehostname-[3],nodehostname-[4,6]",
2,
[
(
"queue1-st-c5xlarge-[1-2],queue1-st-c5xlarge-2,queue1-st-c5xlarge-[3]",
"nodeaddr-[1-2],nodeaddr-2,nodeaddr-[3]",
"nodehostname-[1-2],nodehostname-2,nodehostname-[3]",
),
("queue1-st-c5xlarge-[4,6]", "nodeaddr-[4,6]", "nodehostname-[4,6]"),
],
),
("queue1-st-c5xlarge-1,queue1-st-c5xlarge-[2],queue1-st-c5xlarge-3", ["nodeaddr-1"], None, 2, ValueError),
(
"queue1-st-c5xlarge-1,queue1-st-c5xlarge-[2],queue1-st-c5xlarge-3",
None,
["nodehostname-1"],
2,
ValueError,
),
(
"queue1-st-c5xlarge-1,queue1-st-c5xlarge-2,queue1-st-c5xlarge-3",
["nodeaddr-1", "nodeaddr-2"],
"nodehostname-1,nodehostname-2,nodehostname-3",
2,
ValueError,
),
(
["queue1-st-c5xlarge-1", "queue1-st-c5xlarge-2", "queue1-st-c5xlarge-3"],
"nodeaddr-[1],nodeaddr-[2],nodeaddr-3",
["nodehostname-1", "nodehostname-2", "nodehostname-3"],
2,
[
(
"queue1-st-c5xlarge-1,queue1-st-c5xlarge-2",
"nodeaddr-[1],nodeaddr-[2]",
"nodehostname-1,nodehostname-2",
),
("queue1-st-c5xlarge-3", "nodeaddr-3", "nodehostname-3"),
],
),
(
# Test with strings of same length but different number of node entries
"queue1-st-c5xlarge-[1-fillerr],queue1-st-c5xlarge-[2-fillerr],queue1-st-c5xlarge-[3-filler]",
"nodeaddr-1,nodeaddr-2,nodeaddr-3",
["nodehostname-1", "nodehostname-2", "nodehostname-3"],
2,
ValueError,
),
],
ids=[
"nodename_only",
"name+addr",
"name+addr+hostname",
"incorrect_addr1",
"incorrect_hostname1",
"incorrect_addr2",
"mixed_format",
"same_length_string",
],
)
def test_batch_node_info(nodenames, nodeaddrs, hostnames, batch_size, expected_result):
if expected_result is not ValueError:
assert_that(list(_batch_node_info(nodenames, nodeaddrs, hostnames, batch_size))).is_equal_to(expected_result)
else:
try:
_batch_node_info(nodenames, nodeaddrs, hostnames, batch_size)
except Exception as e:
assert_that(e).is_instance_of(ValueError)
pass
else:
pytest.fail("Expected _batch_node_info to raise ValueError.")
@pytest.mark.parametrize(
"nodes, reason, reset_addrs, update_call_kwargs",
[
(
"nodes-1,nodes[2-6]",
None,
False,
{"nodes": "nodes-1,nodes[2-6]", "state": "resume", "reason": None, "raise_on_error": False},
),
(
"nodes-1,nodes[2-6]",
"debugging",
True,
{
"nodes": "nodes-1,nodes[2-6]",
"nodeaddrs": "nodes-1,nodes[2-6]",
"nodehostnames": "nodes-1,nodes[2-6]",
"state": "resume",
"reason": "debugging",
"raise_on_error": False,
},
),
(
["nodes-1", "nodes[2-4]", "nodes-5"],
"debugging",
True,
{
"nodes": ["nodes-1", "nodes[2-4]", "nodes-5"],
"nodeaddrs": ["nodes-1", "nodes[2-4]", "nodes-5"],
"nodehostnames": ["nodes-1", "nodes[2-4]", "nodes-5"],
"state": "resume",
"reason": "debugging",
"raise_on_error": False,
},
),
],
)
def test_set_nodes_idle(nodes, reason, reset_addrs, update_call_kwargs, mocker):
update_mock = mocker.patch("common.schedulers.slurm_commands.update_nodes", autospec=True)
set_nodes_idle(nodes, reason, reset_addrs)
update_mock.assert_called_with(**update_call_kwargs)
@pytest.mark.parametrize(
"nodes, reason, reset_addrs, update_call_kwargs",
[
(
"nodes-1,nodes[2-6]",
"debugging",
True,
{"nodes": "nodes-1,nodes[2-6]", "state": "down", "reason": "debugging"},
),
(
["nodes-1", "nodes[2-4]", "nodes-5"],
"debugging",
True,
{"nodes": ["nodes-1", "nodes[2-4]", "nodes-5"], "state": "down", "reason": "debugging"},
),
],
)
def test_set_nodes_down(nodes, reason, reset_addrs, update_call_kwargs, mocker):
update_mock = mocker.patch("common.schedulers.slurm_commands.update_nodes", autospec=True)
set_nodes_down(nodes, reason)
update_mock.assert_called_with(**update_call_kwargs)
@pytest.mark.parametrize(
"nodes, reason, reset_addrs, update_call_kwargs",
[
(
"nodes-1,nodes[2-6]",
None,
False,
{"nodes": "nodes-1,nodes[2-6]", "state": "power_down", "reason": None, "raise_on_error": True},
),
(
"nodes-1,nodes[2-6]",
"debugging",
True,
{"nodes": "nodes-1,nodes[2-6]", "state": "power_down", "reason": "debugging", "raise_on_error": True},
),
(
["nodes-1", "nodes[2-4]", "nodes-5"],
"debugging",
True,
{
"nodes": ["nodes-1", "nodes[2-4]", "nodes-5"],
"state": "power_down",
"reason": "debugging",
"raise_on_error": True,
},
),
],
)
def test_set_nodes_power_down(nodes, reason, reset_addrs, update_call_kwargs, mocker):
update_mock = mocker.patch("common.schedulers.slurm_commands.reset_nodes", autospec=True)
set_nodes_power_down(nodes, reason)
update_mock.assert_called_with(**update_call_kwargs)
@pytest.mark.parametrize(
"nodes, reason, reset_addrs, update_call_kwargs",
[
(
"nodes-1,nodes[2-6]",
"debugging",
True,
{"nodes": "nodes-1,nodes[2-6]", "state": "drain", "reason": "debugging"},
),
(
["nodes-1", "nodes[2-4]", "nodes-5"],
"debugging",
True,
{"nodes": ["nodes-1", "nodes[2-4]", "nodes-5"], "state": "drain", "reason": "debugging"},
),
],
)
def test_set_nodes_drain(nodes, reason, reset_addrs, update_call_kwargs, mocker):
update_mock = mocker.patch("common.schedulers.slurm_commands.update_nodes", autospec=True)
set_nodes_drain(nodes, reason)
update_mock.assert_called_with(**update_call_kwargs)
@pytest.mark.parametrize(
"batch_node_info, state, reason, raise_on_error, run_command_calls",
[
(
[("queue1-st-c5xlarge-1", None, None), ("queue1-st-c5xlarge-2,queue1-st-c5xlarge-3", None, None)],
None,
None,
False,
[
call(
"/opt/slurm/bin/scontrol update nodename=queue1-st-c5xlarge-1",
raise_on_error=False,
timeout=60,
shell=True,
),
call(
"/opt/slurm/bin/scontrol update nodename=queue1-st-c5xlarge-2,queue1-st-c5xlarge-3",
raise_on_error=False,
timeout=60,
shell=True,
),
],
),
(
[
("queue1-st-c5xlarge-1", None, "hostname-1"),
("queue1-st-c5xlarge-2,queue1-st-c5xlarge-3", "addr-2,addr-3", None),
],
"power_down",
None,
True,
[
call(
"/opt/slurm/bin/scontrol update state=power_down "
"nodename=queue1-st-c5xlarge-1 nodehostname=hostname-1",
raise_on_error=True,
timeout=60,
shell=True,
),
call(
"/opt/slurm/bin/scontrol update state=power_down "
"nodename=queue1-st-c5xlarge-2,queue1-st-c5xlarge-3 nodeaddr=addr-2,addr-3",
raise_on_error=True,
timeout=60,
shell=True,
),
],
),
(
[
("queue1-st-c5xlarge-1", None, "hostname-1"),
("queue1-st-c5xlarge-[3-6]", "addr-[3-6]", "hostname-[3-6]"),
],
"down",
"debugging",
True,
[
call(
(
'/opt/slurm/bin/scontrol update state=down reason="debugging"'
+ " nodename=queue1-st-c5xlarge-1 nodehostname=hostname-1"
),
raise_on_error=True,
timeout=60,
shell=True,
),
call(
(
'/opt/slurm/bin/scontrol update state=down reason="debugging"'
+ " nodename=queue1-st-c5xlarge-[3-6] nodeaddr=addr-[3-6] nodehostname=hostname-[3-6]"
),
raise_on_error=True,
timeout=60,
shell=True,
),
],
),
],
)
def test_update_nodes(batch_node_info, state, reason, raise_on_error, run_command_calls, mocker):
mocker.patch("common.schedulers.slurm_commands._batch_node_info", return_value=batch_node_info, autospec=True)
cmd_mock = mocker.patch("common.schedulers.slurm_commands.run_command", autospec=True)
update_nodes(batch_node_info, "some_nodeaddrs", "some_hostnames", state, reason, raise_on_error)
cmd_mock.assert_has_calls(run_command_calls)
@pytest.mark.parametrize(
"node, expected_output",
[
(SlurmNode("queue-name-st-t2micro-1", "nodeip", "nodehostname", "somestate"), True),
(SlurmNode("queue-name-st-dy-t2micro-1", "nodeip", "nodehostname", "somestate"), False),
(SlurmNode("queuename-dy-t2micro-1", "nodeip", "nodehostname", "somestate"), False),
(SlurmNode("queuename-dy-dy-dy-st-t2micro-1", "nodeip", "nodehostname", "somestate"), True),
],
)
def test_slurm_node_is_static(node, expected_output):
assert_that(node.is_static).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(SlurmNode("queue-name-st-t2micro-1", "nodeip", "nodehostname", "somestate"), True),
(SlurmNode("queuename-dy-t2micro-1", "queuename-dy-t2micro-1", "nodehostname", "somestate"), False),
],
)
def test_slurm_node_is_nodeaddr_set(node, expected_output):
assert_that(node.is_nodeaddr_set()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "somestate"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED#+CLOUD+DRAIN"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED*+CLOUD+DRAIN"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "COMPLETING+DRAIN"), True),
],
)
def test_slurm_node_has_job(node, expected_output):
assert_that(node.has_job()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "somestate"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED#+CLOUD+DRAIN"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED*+CLOUD+DRAIN"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE*+CLOUD+DRAIN"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD+DRAIN"), True),
],
)
def test_slurm_node_is_drained(node, expected_output):
assert_that(node.is_drained()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "somestate"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED#+CLOUD+DOWN"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED*+CLOUD+DRAIN"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "DOWN*+CLOUD"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "DOWN+CLOUD+POWER"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE~+CLOUD+POWERING_DOWN"), False),
],
)
def test_slurm_node_is_down(node, expected_output):
assert_that(node.is_down()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"node, expected_output",
[
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWER"), True),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "MIXED#+CLOUD+DRAIN"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "ALLOCATED*+CLOUD+DOWN"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE+CLOUD+POWERING_DOWN"), False),
(SlurmNode("queue1-st-c5xlarge-1", "nodeip", "nodehostname", "IDLE#+CLOUD"), True),
],
)
def test_slurm_node_is_up(node, expected_output):
assert_that(node.is_up()).is_equal_to(expected_output)
@pytest.mark.parametrize(
"partitions, state, run_command_calls, run_command_side_effects, expected_succeeded_partitions",
[
(
["part-1", "part-2"],
PartitionStatus.INACTIVE,
[
call(
"/opt/slurm/bin/scontrol update partitionname=part-1 state=INACTIVE",
raise_on_error=True,
shell=True,
),
call(
"/opt/slurm/bin/scontrol update partitionname=part-2 state=INACTIVE",
raise_on_error=True,
shell=True,
),
],
[Exception, None],
["part-2"],
),
(
["part-1", "part-2"],
"UP",
[
call("/opt/slurm/bin/scontrol update partitionname=part-1 state=UP", raise_on_error=True, shell=True),
call("/opt/slurm/bin/scontrol update partitionname=part-2 state=UP", raise_on_error=True, shell=True),
],
[Exception, None],
["part-2"],
),
(
[],
"UP",
[],
[],
[],
),
],
)
def test_update_partitions(
partitions, state, run_command_calls, run_command_side_effects, expected_succeeded_partitions, mocker
):
run_command_spy = mocker.patch(
"common.schedulers.slurm_commands.run_command", side_effect=run_command_side_effects, auto_spec=True
)
assert_that(update_partitions(partitions, state)).is_equal_to(expected_succeeded_partitions)
if run_command_calls:
run_command_spy.assert_has_calls(run_command_calls)
else:
run_command_spy.assert_not_called()
@pytest.mark.parametrize(
(
"mock_partitions",
"state",
"reset_node_info",
"expected_reset_nodes_calls",
"partitions_to_update",
"mock_succeeded_partitions",
"expected_results",
),
[
(
[
SlurmPartition("part-1", "node-1,node-2", "INACTIVE"),
SlurmPartition("part-2", "node-3,node-4", "UP"),
],
PartitionStatus.INACTIVE,
True,
[call("node-3,node-4", reason="stopping cluster", state="power_down")],
["part-2"],
["part-2"],
True,
),
(
[
SlurmPartition("part-1", "node-1,node-2", "DRAIN"),
SlurmPartition("part-2", "node-3,node-4", "UP"),
],
PartitionStatus.INACTIVE,
True,
[
call("node-1,node-2", reason="stopping cluster", state="power_down"),
call("node-3,node-4", reason="stopping cluster", state="power_down"),
],
["part-1", "part-2"],
["part-1", "part-2"],
True,
),
(
[
SlurmPartition("part-1", "node-1,node-2", "DRAIN"),
SlurmPartition("part-2", "node-3,node-4", "UP"),
],
PartitionStatus.INACTIVE,
False,
[],
["part-1", "part-2"],
["part-1", "part-2"],
True,
),
(
[
SlurmPartition("part-1", "node-1,node-2", "DRAIN"),
SlurmPartition("part-2", "node-3,node-4", "UP"),
],
PartitionStatus.UP,
False,
[],
["part-1"],
[],
False,
),
(
[
SlurmPartition("part-1", "node-1,node-2", "DRAIN"),
SlurmPartition("part-2", "node-3,node-4", "UP"),
],
"UP",
False,
[],
["part-1"],
["part-1"],
True,
),
],
)
def test_update_all_partitions(
mock_partitions,
state,
reset_node_info,
expected_reset_nodes_calls,
partitions_to_update,
mock_succeeded_partitions,
expected_results,
mocker,
):
reset_node_spy = mocker.patch("common.schedulers.slurm_commands.reset_nodes", auto_spec=True)
update_partitions_spy = mocker.patch(
"common.schedulers.slurm_commands.update_partitions", return_value=mock_succeeded_partitions, auto_spec=True
)
get_part_spy = mocker.patch(
"common.schedulers.slurm_commands.get_partition_info", return_value=mock_partitions, auto_spec=True
)
assert_that(update_all_partitions(state, reset_node_addrs_hostname=reset_node_info)).is_equal_to(expected_results)
get_part_spy.assert_called_with(get_all_nodes=True)
if expected_reset_nodes_calls:
reset_node_spy.assert_has_calls(expected_reset_nodes_calls)
else:
reset_node_spy.assert_not_called()
update_partitions_spy.assert_called_with(partitions_to_update, state)
|
import csv
import sys
from os import path
def get_keys(row):
return [key for (key, value) in (kv_pair.split("=") for kv_pair in row.split("|"))]
def get_values(row):
return [value for (key, value) in (kv_pair.split("=") for kv_pair in row.split("|"))]
def valid_rsl(path2rsl):
if not path.exists(path2rsl):
print("Файл {f_name} не найден.".format(f_name=path2rsl))
return False
try:
path2rsl.split(".")[1]
except IndexError:
print("{f_name} не является файлом.".format(f_name=path2rsl))
return False
if path2rsl.split(".")[1] != "rsl":
print("Файл {f_name} не является rsl.".format(f_name=path2rsl))
return False
return True
def get_path_from_console_args():
argv = sys.argv
if len(argv) > 2:
print("Указано слишком много аргументов. Требуется ввести только путь к файлу.")
elif len(argv) <= 1:
print("Путь к файлу не указан")
else:
return argv[1]
def main(path2rsl):
with open(path2rsl, mode='r', encoding='utf-8') as rslFile:
rows = [row.replace("\n", "") for row in rslFile.readlines()]
keys = get_keys(rows[0])
values_list = [get_values(row) for row in rows]
path2csv = path2rsl.split(".")[0] + ".csv"
with open(path2csv, mode='w', newline="") as csvFile:
csv_writer = csv.writer(csvFile, dialect="excel")
csv_writer.writerow(keys)
csv_writer.writerows(values_list)
print("Файл успешно сохранён:", path2csv)
if __name__ == '__main__':
path2file = get_path_from_console_args()
if path2file and valid_rsl(path2file):
main(path2file)
else:
sys.exit(0)
|
# uncomment to force tensorflow to use CPU
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# import Python Libraries
import numpy as np
import tensorflow as tf
import keras as keras
from keras.models import model_from_json
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
import math
import utility
import cv2
from glob import glob
def face_detector(img_path):
"""returns "True" if face is detected in image"""
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
def path_to_tensor(img_path):
"""convert RGB image to 4D tensor with shape (1, 224, 224, 3)"""
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def dog_detector(img_path):
"""Uses ResNet50 to detect if image contains a dog"""
img = keras.applications.resnet50.preprocess_input(
path_to_tensor(img_path))
prediction = np.argmax(ResNet50_model.predict(img))
return ((prediction <= 268) & (prediction >= 151))
def extract_bottleneck_features(tensor):
"""Returns the InceptionV3 bottleneck features"""
return InceptionV3(weights='imagenet', include_top=False).predict(keras.applications.inception_v3.preprocess_input(tensor))
def predict_breed(img_path):
"""Returns the dog breed that is predicted by the model"""
inceptionV3_bottleneck_features = extract_bottleneck_features(
path_to_tensor(img_path))
predicted_vector = model.predict(inceptionV3_bottleneck_features)
return dog_names[np.argmax(predicted_vector)]
def predict_dog_breed_from_human_or_dog(img_path):
"""Returns a tuple of image with image type (person or dog)
and the dog breed that is predicted by the model
"""
image_type = "error"
dog_breed = "N/A"
if face_detector(img_path):
image_type = "face"
dog_breed = predict_breed(img_path)
elif dog_detector(img_path):
image_type = "dog"
dog_breed = predict_breed(img_path)
return image_type, dog_breed
def load_model(model_path, weights_path):
""" load json and create model and loads weights """
# load json and create model
json_file = open(model_path, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
# load the model weights with the best validation loss.
model.load_weights(weights_path)
return model
def process_folder(folder_path):
""" process all images in folder """
sample_images = glob(folder_path + "/*")
for img_path in sample_images:
image_type, dog_breed = predict_dog_breed_from_human_or_dog(img_path)
if image_type is "face":
print("{} is a human that looks like a {}".format(img_path, dog_breed))
elif image_type is "dog":
print("{} is a dog that looks like a {}".format(img_path, dog_breed))
else:
print("{} is not a human or dog!".format(img_path))
if __name__ == "__main__":
print(("* Initialising..."))
# ignore tensorflow warnings
tf.logging.set_verbosity(tf.logging.ERROR)
# define ResNet50 model
ResNet50_model = ResNet50(weights='imagenet')
# extract pre-trained face detector
face_cascade = cv2.CascadeClassifier(
'haarcascades/haarcascade_frontalface_alt.xml')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("dogImages/train/*/"))]
# load json and create model
model = load_model('saved_models/inceptionv3_model.json',
'saved_models/weights.best.InceptionV3.hdf5')
# load list of dog names
process_folder("images")
|
"""
This program shows how to create a tuple with a single element.
"""
# This just creates an integer...
x = (3)
print(x)
print(type(x))
# This creates a tuple
x = (3,)
print(x)
print(type(x)) |
from Among_Us import execute_action
x_positions = (613, 790, 963, 1138, 1312)
indicators = [(x, 900) for x in x_positions]
switches = [(x, 784) for x in x_positions]
def run(screenshot, task_data, trigger):
for i in range(len(x_positions)):
if screenshot.getpixel(indicators[i])[1] <= 150:
execute_action("mouse_move", *switches[i])
execute_action("mouse_click")
execute_action("wait", 1)
return True
|
'''OpenGL extension OES.sample_shading
This module customises the behaviour of the
OpenGL.raw.GLES2.OES.sample_shading to provide a more
Python-friendly API
Overview (from the spec)
In standard multisample rendering, an implementation is allowed to
assign the same sets of fragment shader input values to each sample.
This can cause aliasing where the fragment shader input values are
used to generate a result that doesn't antialias itself, for example
with alpha-tested transparency.
This extension adds the ability to explicitly request that an
implementation use a minimum number of unique set of fragment
computation inputs when multisampling a pixel. Specifying such a
requirement can reduce aliasing that results from evaluating the
fragment computations too few times per pixel.
This extension adds new global state that controls the minimum
number of samples for which attribute data is independently
interpolated. When enabled, all fragment-shading operations
are executed independently on each sample.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/sample_shading.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.sample_shading import *
from OpenGL.raw.GLES2.OES.sample_shading import _EXTENSION_NAME
def glInitSampleShadingOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
number=int(input())
if ((number>25) and (number<75)):
print("True")
else:
print("False") |
import torch
from numpy import pi
from torch.utils.data import TensorDataset
from sggm.definitions import STAGE_SETUP_SHIFTED_SPLIT
"""
Helper for shifted datamodules
"""
def t_log(x: float) -> torch.Tensor:
return torch.log(torch.Tensor([x]))
def log_density(x: torch.Tensor) -> torch.Tensor:
"""Computes the density of points for a set of points
Args:
x (torch.Tensor): set of points (N x D)
Returns:
torch.Tensor: density in # points per unit of volume in the set
"""
hypercube_min, _ = torch.min(x, dim=0)
hypercube_max, _ = torch.max(x, dim=0)
log_vol = torch.sum(torch.log(hypercube_max - hypercube_min), 0)
return torch.log(torch.Tensor([x.shape[0]])) - log_vol
def log_radius(p_tot: float, p_k: float, x: torch.Tensor) -> torch.Tensor:
"""Computes the radius of the holes to introduce in the training data
Args:
p_tot (float): proportion of total expected points in B_1 U B_2 U ... B_K
p_k (float): proportion of points sampled as {B_k}_{k=1}^{K} centers
x (torch.Tensor): sets of points
Returns:
torch.Tensor: radius of any B_k
"""
log_d = log_density(x)
D = x.shape[1]
# log_r = torch.pow(
# (p_tot / p_k)
# * (torch.lgamma(torch.Tensor([D / 2 + 1])) - log_d).exp()
# / (pi ** (D / 2)),
# 1 / D,
# )
log_r = (
1
/ D
* (
t_log(p_tot / p_k)
+ torch.lgamma(torch.Tensor([D / 2 + 1]))
- log_d
- t_log(pi ** (D / 2))
)
)
return log_r
def generate_shift(
proportions: tuple([float]),
train: tuple([torch.Tensor]),
test: tuple([torch.Tensor]),
) -> tuple([tuple([torch.Tensor])]):
# Unpack
shifting_proportion_total, shifting_proportion_k = proportions
x_train, y_train = train
x_test, y_test = test
# Sample p_k% of training samples to serve as center for hyperballs
K = max(int(shifting_proportion_k * x_train.shape[0]), 1)
idx_k = torch.multinomial(
torch.ones_like(x_train[:, 0]).flatten(), K, replacement=False
)
x_k = x_train[idx_k]
# Determine average distance between points
log_dist = log_radius(shifting_proportion_total, shifting_proportion_k, x_train)
# Any point laying inside any hyperball gets affected to test
in_any_b_k = torch.where(
torch.where(torch.log(torch.cdist(x_train, x_k)) < log_dist, 1, 0).sum(dim=1)
>= 1,
1,
0,
)
x_test = torch.cat((x_test, x_train[in_any_b_k == 1]), dim=0)
y_test = torch.cat((y_test, y_train[in_any_b_k == 1]), dim=0)
x_train = x_train[in_any_b_k == 0]
y_train = y_train[in_any_b_k == 0]
return (x_train, y_train), (x_test, y_test)
class DataModuleShifted:
"""
Add-on class to introduce shift between the training and testing distibutions.
[NOTE]: Implicitely assumes that is used in combinaison with a children of another DataModule
"""
def __init__(
self,
shifting_proportion_total: float = 0.1,
shifting_proportion_k: float = 1e-2,
*args,
**kwargs
):
self.shifting_proportion_total = float(shifting_proportion_total)
self.shifting_proportion_k = float(shifting_proportion_k)
def setup(self):
train, test = generate_shift(
(self.shifting_proportion_total, self.shifting_proportion_k),
self.train_dataset.dataset.tensors,
self.test_dataset.tensors,
)
self.train_dataset = TensorDataset(*train)
self.setup_train_val_datasets(self.train_dataset)
self.test_dataset = TensorDataset(*test)
class DataModuleShiftedSplit:
"""
Add-on class to introduce shift based on splits between the training and testing distibutions.
Based on: https://arxiv.org/abs/1906.11537 ['In-Between'] Uncertainty in BNN
[NOTE]: Implicitely assumes that is used in combinaison with a children of another DataModule
"""
def setup(self, dim_idx: int, stage: str = None):
if stage == STAGE_SETUP_SHIFTED_SPLIT:
x_train, y_train = self.train_dataset.dataset.tensors
x_test, y_test = self.test_dataset.tensors
_, dim_col_indices = x_train[:, dim_idx].sort()
N = dim_col_indices.shape[0]
split_indices = dim_col_indices[int(N / 3) : int((2 * N) / 3)]
non_split_indices = torch.cat(
(dim_col_indices[: int(N / 3)], dim_col_indices[int((2 * N) / 3) :])
)
x_test = torch.cat((x_test, x_train[split_indices]), dim=0)
y_test = torch.cat((y_test, y_train[split_indices]), dim=0)
x_train, y_train = x_train[non_split_indices], y_train[non_split_indices]
self.train_dataset = TensorDataset(x_train, y_train)
self.setup_train_val_datasets(self.train_dataset)
self.test_dataset = TensorDataset(x_test, y_test)
|
#!/usr/bin/env python
##############################################################################
##
# This file is part of Sardana
##
# http://www.sardana-controls.org/
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Sardana is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Sardana is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Sardana. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
"""The sardana tango motor module"""
__all__ = ["Motor", "MotorClass"]
__docformat__ = 'restructuredtext'
import sys
import time
from PyTango import DevFailed, Except, DevVoid, DevShort, \
DevLong, DevDouble, DevBoolean, DispLevel, DevState, AttrQuality, \
READ, READ_WRITE, SCALAR, SPECTRUM
from taurus.core.util.log import DebugIt
from sardana import State, SardanaServer
from sardana.sardanautils import str_to_value
from sardana.sardanaattribute import SardanaAttribute
from sardana.pool.poolexception import PoolException
from sardana.tango.core.util import memorize_write_attribute, exception_str, \
to_tango_type_format, throw_sardana_exception
from sardana.tango.pool.PoolDevice import PoolElementDevice, \
PoolElementDeviceClass
class Motor(PoolElementDevice):
"""The tango motor device class. This class exposes through a tango device
the sardana motor (:class:`~sardana.pool.poolmotor.PoolMotor`).
.. rubric:: The states
The motor interface knows five states which are ON, MOVING, ALARM,
FAULT and UNKNOWN. A motor device is in MOVING state when it is
moving! It is in ALARM state when it has reached one of the limit
switches and is in FAULT if its controller software is not available
(impossible to load it) or if a fault is reported from the hardware
controller. The motor is in the UNKNOWN state if an exception occurs
during the communication between the pool and the hardware controller.
When the motor is in ALARM state, its status will indicate which limit
switches is active.
.. rubric:: The commands
The motor interface supports 3 commands on top of the Tango classical
Init, State and Status commands. These commands are summarized in the
following table:
============== ================ ================
Command name Input data type Output data type
============== ================ ================
Stop void void
Abort void void
DefinePosition Tango::DevDouble void
SaveConfig void void
MoveRelative Tango::DevDouble void
============== ================ ================
- **Stop** : It stops a running motion. This command does not have input or
output argument.
- **Abort** : It aborts a running motion. This command does not have input or
output argument.
- **DefinePosition** : Loads a position into controller. It has one input
argument which is the new position value (a double). It is allowed only in
the ON or ALARM states. The unit used for the command input value is the
physical unit: millimeters or milli-radians. It is always an absolute
position.
- **SaveConfig** : Write some of the motor parameters in database. Today, it
writes the motor acceleration, deceleration, base_rate and velocity into
database as motor device properties. It is allowed only in the ON or ALARM
states
- **MoveRelative** : Moves the motor by a relative to the current position
distance. It has one input argument which is the relative distance
(a double). It is allowed only in the ON or ALARM states. The unit used for
the command input value is the physical unit: millimeters or milli-radians.
The classical Tango Init command destroys the motor and re-create it.
.. rubric:: The attributes
The motor interface supports several attributes which are summarized
in the following table:
============== ================= =========== ======== ========= ===============
Name Data type Data format Writable Memorized Operator/Expert
============== ================= =========== ======== ========= ===============
Position Tango::DevDouble Scalar R/W No * Operator
DialPosition Tango::DevDouble Scalar R No Expert
Offset Tango::DevDouble Scalar R/W Yes Expert
Acceleration Tango::DevDouble Scalar R/W Yes Expert
Base_rate Tango::DevDouble Scalar R/W Yes Expert
Deceleration Tango::DevDouble Scalar R/W Yes Expert
Velocity Tango::DevDouble Scalar R/W Yes Expert
Limit_switches Tango::DevBoolean Spectrum R No Expert
SimulationMode Tango::DevBoolean Scalar R No Expert
Step_per_unit Tango::DevDouble Scalar R/W Yes Expert
Backlash Tango::DevLong Scalar R/W Yes Expert
============== ================= =========== ======== ========= ===============
- **Position** : This is read-write scalar double attribute. With the classical
Tango min_value and max_value attribute properties, it is easy to define
authorized limit for this attribute. See the definition of the
DialPosition and Offset attributes to get a precise definition of the
meaning of this attribute. It is not allowed to read or write this
attribute when the motor is in FAULT or UNKNOWN state. It is also not
possible to write this attribute when the motor is already MOVING.
The unit used for this attribute is the physical unit e.g. millimeters or
milli-radian. It is always an **absolute position** .
- **DialPosition** : This attribute is the motor dial position. The following
formula links together the Position, DialPosition, Sign and Offset attributes:
Position = Sign * DialPosition + Offset
This allows to have the motor position centered around any position
defined by the Offset attribute (classically the X ray beam position).
It is a read only attribute. To set the motor position, the user has
to use the Position attribute. It is not allowed to read this
attribute when the motor is in FAULT or UNKNOWN mode. The unit used
for this attribute is the physical unit: millimeters or milli-radian.
It is also always an **absolute** position.
- **Offset** : The offset to be applied in the motor position computation. By
default set to 0. It is a memorized attribute. It is not allowed to
read or write this attribute when the motor is in FAULT, MOVING or
UNKNOWN mode.
- **Acceleration** : This is an expert read-write scalar double attribute.
This parameter value is written in database when the SaveConfig command is
executed. It is not allowed to read or write this attribute when the motor is
in FAULT or UNKNOWN state.
- **Deceleration** : This is an expert read-write scalar double attribute.
This parameter value is written in database when the SaveConfig command is
executed. It is not allowed to read or write this attribute when the motor is
in FAULT or UNKNOWN state.
- **Base_rate** : This is an expert read-write scalar double attribute. This
parameter value is written in database when the SaveConfig command is executed.
It is not allowed to read or write this attribute when the motor is in
FAULT or UNKNOWN state.
- **Velocity** : This is an expert read-write scalar double attribute.
This parameter value is written in database when the SaveConfig command is
executed. It is not allowed to read or write this attribute when the motor is
in FAULT or UNKNOWN state.
- **Limit_switches** : Three limit switches are managed by this attribute.
Each of the switch are represented by a boolean value: False means inactive
while True means active. It is a read only attribute. It is not possible to
read this attribute when the motor is in UNKNOWN mode. It is a
spectrum attribute with 3 values which are:
- Data[0] : The Home switch value
- Data[1] : The Upper switch value
- Data[2] : The Lower switch value
- **SimulationMode** : This is a read only scalar boolean attribute. When set,
all motion requests are not forwarded to the software controller and then to
the hardware. When set, the motor position is simulated and is immediately
set to the value written by the user. To set this attribute, the user
has to used the pool device Tango interface. The value of the
position, acceleration, deceleration, base_rate, velocity and offset
attributes are memorized at the moment this attribute is set. When
this mode is turned off, if the value of any of the previously
memorized attributes has changed, it is reapplied to the memorized
value. It is not allowed to read this attribute when the motor is in
FAULT or UNKNOWN states.
- **Step_per_unit** : This is the number of motor step per millimeter or per
degree. It is a memorized attribute. It is not allowed to read or write this
attribute when the motor is in FAULT or UNKNOWN mode. It is also not
allowed to write this attribute when the motor is MOVING. The default
value is 1.
- **Backlash** : If this attribute is defined to something different than 0,
the motor will always stop the motion coming from the same mechanical
direction. This means that it could be possible to ask the motor to go
a little bit after the desired position and then to return to the
desired position. The attribute value is the number of steps the motor
will pass the desired position if it arrives from the "wrong"
direction. This is a signed value. If the sign is positive, this means
that the authorized direction to stop the motion is the increasing
motor position direction. If the sign is negative, this means that the
authorized direction to stop the motion is the decreasing motor
position direction. It is a memorized attribute. It is not allowed to
read or write this attribute when the motor is in FAULT or UNKNOWN
mode. It is also not allowed to write this attribute when the motor is
MOVING. Some hardware motor controllers are able to manage this
backlash feature. If it is not the case, the motor interface will
implement this behavior.
All the motor devices will have the already described attributes but
some hardware motor controller supports other features which are not
covered by this list of pre-defined attributes. Using Tango dynamic
attribute creation, a motor device may have extra attributes used to
get/set the motor hardware controller specific features. These are the
attributes specified on the controller with
:attr:`~sardana.pool.controller.Controller.axis_attribues`.
.. rubric:: The properties
- **Sleep_bef_last_read** : This property exposes the motor
*instability time*. It defines the time in milli-second that the software
managing a motor movement will wait between it detects the end of the
motion and the last motor position reading.
.. rubric:: Getting motor state and limit switches using event
The simplest way to know if a motor is moving is to survey its state.
If the motor is moving, its state will be MOVING. When the motion is
over, its state will be back to ON (or ALARM if a limit switch has
been reached). The pool motor interface allows client interested by
motor state or motor limit switches value to use the Tango event
system subscribing to motor state change event. As soon as a motor
starts a motion, its state is changed to MOVING and an event is sent.
As soon as the motion is over, the motor state is updated and another
event is sent. In the same way, as soon as a change in the limit
switches value is detected, a change event is sent to client(s) which
have subscribed to change event on the Limit_Switches attribute.
.. rubric:: Reading the motor position attribute
For each motor, the key attribute is its position. Special care has
been taken on this attribute management. When the motor is not moving,
reading the Position attribute will generate calls to the controller
and therefore hardware access. When the motor is moving, its position
is automatically read every 100 milli-seconds and stored in the cache.
This means that a client reading motor Position
attribute while the motor is moving will get the position from the
cache and will not generate extra controller calls. It
is also possible to get a motor position using the Tango event system.
When the motor is moving, an event is sent to the registered clients
when the change event criterion is true. By default, this change event
criterion is set to be a difference in position of 1. It is tunable on
a motor basis using the classical motor Position attribute abs_change
property or at the pool device basis using its DefaultMotPos_AbsChange
property. Anyway, not more than 10 events could be sent by second.
Once the motion is over, the motor position is made unavailable from
the Tango polling buffer and is read a last time after a tunable
waiting time (Sleep_bef_last_read property). A forced change event
with this value is sent to clients using events.
"""
def __init__(self, dclass, name):
"""Constructor"""
self.in_write_position = False
PoolElementDevice.__init__(self, dclass, name)
def init(self, name):
PoolElementDevice.init(self, name)
def _is_allowed(self, req_type):
return PoolElementDevice._is_allowed(self, req_type)
def get_motor(self):
return self.element
def set_motor(self, motor):
self.element = motor
motor = property(get_motor, set_motor)
def set_write_dial_position_to_db(self):
dial = self.motor.get_dial_position_attribute()
if dial.has_write_value():
data = dict(DialPosition=dict(
__value=dial.w_value, __value_ts=dial.w_timestamp))
db = self.get_database()
db.put_device_attribute_property(self.get_name(), data)
def get_write_dial_position_from_db(self):
name = 'DialPosition'
db = self.get_database()
pos_props = db.get_device_attribute_property(
self.get_name(), name)[name]
w_pos = pos_props["__value"][0]
_, _, attr_info = self.get_dynamic_attributes()[0][name]
w_pos = str_to_value(w_pos, attr_info.dtype, attr_info.dformat)
w_pos, w_ts = float(pos_props["__value"][0]), None
if "__value_ts" in pos_props:
w_ts = float(pos_props["__value_ts"][0])
return w_pos, w_ts
@DebugIt()
def delete_device(self):
PoolElementDevice.delete_device(self)
motor = self.motor
if motor is not None:
motor.remove_listener(self.on_motor_changed)
@DebugIt()
def init_device(self):
PoolElementDevice.init_device(self)
motor = self.motor
if motor is None:
full_name = self.get_full_name()
name = self.alias or full_name
self.motor = motor = \
self.pool.create_element(type="Motor", name=name,
full_name=full_name, id=self.Id, axis=self.Axis,
ctrl_id=self.Ctrl_id)
if self.instrument is not None:
motor.set_instrument(self.instrument)
# if in constructor, for all memorized no init attributes (position)
# let poolmotor know their write values
if self.in_constructor:
try:
w_pos, w_ts = self.get_write_dial_position_from_db()
self.in_write_position = True
try:
motor.set_write_position(w_pos, timestamp=w_ts)
finally:
self.in_write_position = False
except KeyError:
pass
if self.Sleep_bef_last_read > 0:
motor.set_instability_time(self.Sleep_bef_last_read / 1000)
motor.add_listener(self.on_motor_changed)
self.set_state(DevState.ON)
def on_motor_changed(self, event_source, event_type, event_value):
try:
self._on_motor_changed(event_source, event_type, event_value)
except DevFailed:
raise
except:
msg = 'Error occurred "on_motor_changed(%s.%s): %s"'
exc_info = sys.exc_info()
self.error(msg, self.motor.name, event_type.name,
exception_str(*exc_info[:2]))
self.debug("Details", exc_info=exc_info)
def _on_motor_changed(self, event_source, event_type, event_value):
# during server startup and shutdown avoid processing element
# creation events
if SardanaServer.server_state != State.Running:
return
timestamp = time.time()
name = event_type.name.lower()
if name == "w_position" and not self.in_write_position:
self.debug("Storing dial set point: %s",
self.motor.dial_position.w_value)
self.set_write_dial_position_to_db()
return
try:
attr = self.get_attribute_by_name(name)
except DevFailed:
return
quality = AttrQuality.ATTR_VALID
priority = event_type.priority
value, w_value, error = None, None, None
if name == "state":
value = self.calculate_tango_state(event_value)
elif name == "status":
value = self.calculate_tango_status(event_value)
else:
if isinstance(event_value, SardanaAttribute):
if event_value.error:
error = Except.to_dev_failed(*event_value.exc_info)
else:
value = event_value.value
timestamp = event_value.timestamp
else:
value = event_value
state = self.motor.get_state(propagate=0)
if name == "position":
w_value = event_source.get_position_attribute().w_value
if state == State.Moving:
quality = AttrQuality.ATTR_CHANGING
elif name == "dialposition" and state == State.Moving:
quality = AttrQuality.ATTR_CHANGING
self.set_attribute(attr, value=value, w_value=w_value,
timestamp=timestamp, quality=quality,
priority=priority, error=error, synch=False)
def always_executed_hook(self):
pass
def read_attr_hardware(self, data):
pass
def get_dynamic_attributes(self):
cache_built = hasattr(self, "_dynamic_attributes_cache")
std_attrs, dyn_attrs = \
PoolElementDevice.get_dynamic_attributes(self)
if not cache_built:
# For position attribute, listen to what the controller says for data
# type (between long and float)
pos = std_attrs.get('position')
if pos is not None:
_, data_info, attr_info = pos
ttype, _ = to_tango_type_format(attr_info.dtype)
data_info[0][0] = ttype
return std_attrs, dyn_attrs
def initialize_dynamic_attributes(self):
attrs = PoolElementDevice.initialize_dynamic_attributes(self)
detect_evts = "position", "dialposition",
non_detect_evts = "limit_switches", "step_per_unit", "offset", \
"sign", "velocity", "acceleration", "deceleration", "base_rate", \
"backlash"
for attr_name in detect_evts:
if attr_name in attrs:
self.set_change_event(attr_name, True, True)
for attr_name in non_detect_evts:
if attr_name in attrs:
self.set_change_event(attr_name, True, False)
def read_Position(self, attr):
motor = self.motor
use_cache = motor.is_in_operation() and not self.Force_HW_Read
state = motor.get_state(cache=use_cache, propagate=0)
position = motor.get_position(cache=use_cache, propagate=0)
if position.error:
Except.throw_python_exception(*position.exc_info)
quality = None
if state == State.Moving:
quality = AttrQuality.ATTR_CHANGING
self.set_attribute(attr, value=position.value, w_value=position.w_value,
quality=quality, priority=0,
timestamp=position.timestamp)
def write_Position(self, attr):
self.in_write_position = True
position = attr.get_write_value()
try:
self.info("write_Position(%s)", position)
try:
self.wait_for_operation()
except:
raise Exception("Cannot move: already in motion")
try:
self.motor.position = position
except PoolException as pe:
throw_sardana_exception(pe)
# manually store write dial position in the database
self.set_write_dial_position_to_db()
finally:
self.in_write_position = False
def read_Acceleration(self, attr):
attr.set_value(self.motor.get_acceleration(cache=False))
@memorize_write_attribute
def write_Acceleration(self, attr):
self.motor.acceleration = attr.get_write_value()
def read_Deceleration(self, attr):
attr.set_value(self.motor.get_deceleration(cache=False))
@memorize_write_attribute
def write_Deceleration(self, attr):
self.motor.deceleration = attr.get_write_value()
def read_Base_rate(self, attr):
attr.set_value(self.motor.get_base_rate(cache=False))
@memorize_write_attribute
def write_Base_rate(self, attr):
self.motor.base_rate = attr.get_write_value()
def read_Velocity(self, attr):
attr.set_value(self.motor.get_velocity(cache=False))
@memorize_write_attribute
def write_Velocity(self, attr):
self.motor.velocity = attr.get_write_value()
def read_Offset(self, attr):
attr.set_value(self.motor.get_offset(cache=False).value)
@memorize_write_attribute
def write_Offset(self, attr):
self.motor.offset = attr.get_write_value()
def read_DialPosition(self, attr):
motor = self.motor
use_cache = motor.is_in_operation() and not self.Force_HW_Read
state = motor.get_state(cache=use_cache, propagate=0)
dial_position = motor.get_dial_position(cache=use_cache, propagate=0)
if dial_position.error:
Except.throw_python_exception(*dial_position.exc_info)
quality = None
if state == State.Moving:
quality = AttrQuality.ATTR_CHANGING
self.set_attribute(attr, value=dial_position.value, quality=quality,
priority=0, timestamp=dial_position.timestamp)
def read_Step_per_unit(self, attr):
attr.set_value(self.motor.get_step_per_unit(cache=False))
@memorize_write_attribute
def write_Step_per_unit(self, attr):
step_per_unit = attr.get_write_value()
self.motor.step_per_unit = step_per_unit
def read_Backlash(self, attr):
attr.set_value(self.motor.get_backlash(cache=False))
@memorize_write_attribute
def write_Backlash(self, attr):
self.motor.backlash = attr.get_write_value()
def read_Sign(self, attr):
sign = self.motor.get_sign(cache=False).value
attr.set_value(sign)
@memorize_write_attribute
def write_Sign(self, attr):
self.motor.sign = attr.get_write_value()
def read_Limit_switches(self, attr):
motor = self.motor
use_cache = motor.is_in_operation() and not self.Force_HW_Read
limit_switches = motor.get_limit_switches(cache=use_cache)
self.set_attribute(attr, value=limit_switches.value, priority=0,
timestamp=limit_switches.timestamp)
def DefinePosition(self, argin):
self.motor.define_position(argin)
# update write value of position attribute
pos_attr = self.get_wattribute_by_name("position")
pos_attr.set_write_value(argin)
def is_DefinePosition_allowed(self):
if self.get_state() in (DevState.FAULT, DevState.MOVING,
DevState.UNKNOWN):
return False
return True
def SaveConfig(self):
raise NotImplementedError
def is_SaveConfig_allowed(self):
if self.get_state() in (DevState.FAULT, DevState.MOVING,
DevState.UNKNOWN):
return False
return True
def MoveRelative(self, argin):
raise NotImplementedError
def is_MoveRelative_allowed(self):
if self.get_state() in (DevState.FAULT, DevState.MOVING,
DevState.UNKNOWN):
return False
return True
def get_attributes_to_restore(self):
"""Make sure position is the last attribute to restore"""
restore_attributes = PoolElementDevice.get_attributes_to_restore(self)
try:
restore_attributes.remove('Position')
restore_attributes.append('Position')
except ValueError:
pass
return restore_attributes
is_Position_allowed = _is_allowed
is_Acceleration_allowed = _is_allowed
is_Deceleration_allowed = _is_allowed
is_Base_rate_allowed = _is_allowed
is_Velocity_allowed = _is_allowed
is_Offset_allowed = _is_allowed
is_DialPosition_allowed = _is_allowed
is_Step_per_unit_allowed = _is_allowed
is_Backlash_allowed = _is_allowed
is_Sign_allowed = _is_allowed
is_Limit_switches_allowed = _is_allowed
class MotorClass(PoolElementDeviceClass):
# Class Properties
class_property_list = {
}
# Device Properties
device_property_list = {
'Sleep_bef_last_read': [DevLong,
"Number of mS to sleep before the last read during a motor "
"movement", 0],
'_Acceleration': [DevDouble, "", -1],
'_Deceleration': [DevDouble, "", -1],
'_Velocity': [DevDouble, "", -1],
'_Base_rate': [DevDouble, "", -1],
}
device_property_list.update(PoolElementDeviceClass.device_property_list)
# Command definitions
cmd_list = {
'DefinePosition': [[DevDouble, "New position"], [DevVoid, ""]],
'SaveConfig': [[DevVoid, ""], [DevVoid, ""]],
'MoveRelative': [[DevDouble, "amount to move"], [DevVoid, ""]],
}
cmd_list.update(PoolElementDeviceClass.cmd_list)
# Attribute definitions
attr_list = {}
attr_list.update(PoolElementDeviceClass.attr_list)
standard_attr_list = {
'Position': [[DevDouble, SCALAR, READ_WRITE],
{'abs_change': '1.0', }],
'Acceleration': [[DevDouble, SCALAR, READ_WRITE],
{'Memorized': "true", }],
'Deceleration': [[DevDouble, SCALAR, READ_WRITE],
{'Memorized': "true", }],
'Base_rate': [[DevDouble, SCALAR, READ_WRITE],
{'Memorized': "true",
'label': 'Base rate', }],
'Velocity': [[DevDouble, SCALAR, READ_WRITE],
{'Memorized': "true", }],
'Offset': [[DevDouble, SCALAR, READ_WRITE],
{'Memorized': "true",
'Display level': DispLevel.EXPERT}],
'DialPosition': [[DevDouble, SCALAR, READ],
{'label': "Dial position",
'Display level': DispLevel.EXPERT}],
'Step_per_unit': [[DevDouble, SCALAR, READ_WRITE],
{'Memorized': "true",
'label': "Steps p/ unit",
'Display level': DispLevel.EXPERT}],
'Backlash': [[DevLong, SCALAR, READ_WRITE],
{'Memorized': "true",
'Display level': DispLevel.EXPERT}],
'Sign': [[DevShort, SCALAR, READ_WRITE],
{'Memorized': "true",
'Display level': DispLevel.EXPERT}],
'Limit_switches': [[DevBoolean, SPECTRUM, READ, 3],
{'label': "Limit switches (H,U,L)",
'description': "This attribute is the motor "
"limit switches state. It's an array with 3 \n"
"elements which are:\n"
"0 - The home switch\n"
"1 - The upper limit switch\n"
"2 - The lower limit switch\n"
"False means not active. True means active"}],
}
standard_attr_list.update(PoolElementDeviceClass.standard_attr_list)
def _get_class_properties(self):
ret = PoolElementDeviceClass._get_class_properties(self)
ret['Description'] = "Motor device class"
ret['InheritedFrom'].insert(0, 'PoolElementDevice')
return ret
|
from aiohttp import web
from rtcbot import (
RTCConnection,
CVCamera,
PiCamera,
Microphone,
Speaker,
SerialConnection,
)
import asyncio
import logging
# logging.basicConfig(level=logging.DEBUG)
routes = web.RouteTableDef()
try:
import picamera
cam = PiCamera()
except:
cam = CVCamera()
mic = Microphone()
# s = Speaker()
conn = RTCConnection()
conn.video.putSubscription(cam)
conn.audio.putSubscription(mic)
# conn.audio.subscribe(s)
arduino = SerialConnection(writeFormat="<hhh", writeKeys=["gas", "turn", "rot"])
arduino.putSubscription(conn)
"""
@conn.subscribe
def oncMsg(msg):
try:
print(msg)
print(msg["gas"])
except Exception as e:
print(e)
arduino.put_nowait(msg)
"""
@arduino.subscribe
def onMessage(msg):
print(msg)
@routes.post("/setupRTC")
async def setupRTC(request):
clientOffer = await request.json()
print("Got client offer")
print(clientOffer)
response = await conn.getLocalDescription(clientOffer)
print("RESPONSE")
print(response)
return web.json_response(response)
async def closer(app):
print("Running closer")
mic.close()
cam.close()
await conn.close()
print("Closed")
app = web.Application()
app.add_routes(routes)
app.on_shutdown.append(closer)
web.run_app(app, port=8000)
|
import datetime
import re
common_notification_paramenters = """contents
headings
subtitle
template_id
content_available
mutable_content
email_body
email_subject
email_from_name
email_from_address
data
url
web_url
app_url
ios_attachments
big_picture
adm_big_picture
chrome_big_picture
buttons
web_buttons
ios_category
android_channel_id
existing_android_channel_id
android_background_layout
small_icon
large_icon
adm_small_icon
adm_large_icon
chrome_web_icon
chrome_web_image
chrome_web_badge
firefox_icon
chrome_icon
ios_sound
android_sound
adm_sound
wp_sound
wp_wns_sound
android_led_color
android_accent_color
android_visibility
ios_badge_type
ios_badge_count
collapse_id
apns_alert
send_after
delayed_option
delivery_time_of_day
ttl
priority
android_group
android_group_message
adm_group
adm_group_message
is_ios
is_android
is_any_web
is_email
is_chrome_web
is_firefox
is_safari
is_wp
is_wp_wns
is_adm
is_chrome"""
class Notification:
"""Base notification
Attributes:
{common_notification_paramenters}
""".format(common_notification_paramenters=common_notification_paramenters)
ANDROID_VISIBILITY_PUBLIC = 1
ANDROID_VISIBILITY_PRIVATE = 0
ANDROID_VISIBILITY_SECRET = -1
IOS_BADGE_TYPE_NONE = None
IOS_BADGE_TYPE_SET_TO = "SetTo"
IOS_BADGE_TYPE_INCREASE = "Increase"
DELAYED_OPTION_TIMEZONE = "timezone"
DELAYED_OPTION_LAST_ACTIVE = "last-active"
def __init__(self,
contents=None,
headings=None,
subtitle=None,
template_id=None,
content_available=None,
mutable_content=None,
email_body=None,
email_subject=None,
email_from_name=None,
email_from_address=None,
data=None,
url=None,
web_url=None,
app_url=None,
ios_attachments=None,
big_picture=None,
adm_big_picture=None,
chrome_big_picture=None,
buttons=None,
web_buttons=None,
ios_category=None,
android_channel_id=None,
existing_android_channel_id=None,
android_background_layout=None,
small_icon=None,
large_icon=None,
adm_small_icon=None,
adm_large_icon=None,
chrome_web_icon=None,
chrome_web_image=None,
chrome_web_badge=None,
firefox_icon=None,
chrome_icon=None,
ios_sound=None,
android_sound=None,
adm_sound=None,
wp_sound=None,
wp_wns_sound=None,
android_led_color=None,
android_accent_color=None,
android_visibility=None,
ios_badge_type=None,
ios_badge_count=None,
collapse_id=None,
apns_alert=None,
send_after=None,
delayed_option=None,
delivery_time_of_day=None,
ttl=None,
priority=None,
android_group=None,
android_group_message=None,
adm_group=None,
adm_group_message=None,
is_ios=None,
is_android=None,
is_any_web=None,
is_email=None,
is_chrome_web=None,
is_firefox=None,
is_safari=None,
is_wp=None,
is_wp_wns=None,
is_adm=None,
is_chrome=None):
self.id = None
self.check_type(contents, "contents", dict)
self.check_type(headings, "headings", dict)
self.check_type(subtitle, "subtitle", dict)
self.check_type(template_id, "template_id", str)
self.check_type(content_available, "content_available", bool)
self.check_type(mutable_content, "mutable_content", bool)
assert contents or content_available is True or template_id, \
("'contents' is required unless content_available=True "
"or template_id is set")
self.content_data = {
"contents": contents,
"headings": headings,
"subtitle": subtitle,
"template_id": template_id,
"content_available": content_available,
"mutable_content": mutable_content,
}
self.check_type(email_body, "email_body", str)
self.check_type(email_subject, "email_subject", str)
self.check_type(email_from_name, "email_from_name", str)
self.check_type(email_from_address, "email_from_address", str)
assert not email_from_address or re.search(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)",
email_from_address
), \
"'email_from_address' is not a valid mail address"
self.email_content_data = {
"email_body": email_body,
"email_subject": email_subject,
"email_from_name": email_from_name,
"email_from_address": email_from_address
}
self.check_type(data, "data", dict)
self.check_type(url, "url", str)
self.check_type(web_url, "web_url", str)
self.check_type(app_url, "app_url", str)
self.check_type(ios_attachments, "ios_attachments", dict)
self.check_type(big_picture, "big_picture", str)
self.check_type(adm_big_picture, "data", str)
self.check_type(chrome_big_picture, "data", str)
self.attachments_data = {
"data": data,
"url": url,
"web_url": web_url,
"app_url": app_url,
"ios_attachments": ios_attachments,
"big_picture": big_picture,
"adm_big_picture": adm_big_picture,
"chrome_big_picture": chrome_big_picture
}
self.check_type(buttons, "buttons", list)
self.check_type(web_buttons, "web_buttons", list)
self.check_type(ios_category, "ios_category", str)
self.action_buttons_data = {
"buttons": buttons,
"web_buttons": web_buttons,
"ios_category": ios_category
}
self.check_type(existing_android_channel_id,
"existing_android_channel_id", str)
self.check_type(android_background_layout,
"android_background_layout", dict)
self.check_type(small_icon, "small_icon", str)
self.check_type(large_icon, "large_icon", str)
self.check_type(adm_small_icon, "adm_small_icon", str)
self.check_type(adm_large_icon, "adm_large_icon", str)
self.check_type(firefox_icon, "firefox_icon", str)
self.check_type(chrome_icon, "chrome_icon", str)
self.check_type(ios_sound, "ios_sound", str)
self.check_type(android_sound, "android_sound", str)
self.check_type(adm_sound, "adm_sound", str)
self.check_type(wp_sound, "wp_sound", str)
self.check_type(wp_wns_sound, "wp_wns_sound", str)
self.check_type(android_led_color, "android_led_color", str)
self.check_type(android_accent_color, "android_accent_color", str)
self.check_type(android_visibility, "android_visibility", int)
assert android_visibility in [1, 0, -1] or not android_visibility, \
"'android_visibility' has to 1, 0 or -1"
self.check_type(ios_badge_type, "ios_badge_type", str)
self.check_type(ios_badge_count, "ios_badge_count", int)
self.check_type(collapse_id, "collapse_id", str)
self.check_type(apns_alert, "apns_alert", dict)
self.appearance_data = {
"android_channel_id": android_channel_id,
"existing_android_channel_id": existing_android_channel_id,
"android_background_layout": android_background_layout,
"small_icon": small_icon,
"large_icon": large_icon,
"adm_small_icon": adm_small_icon,
"adm_large_icon": adm_large_icon,
"chrome_web_icon": chrome_web_icon,
"chrome_web_image": chrome_web_image,
"chrome_web_badge": chrome_web_badge,
"firefox_icon": firefox_icon,
"chrome_icon": chrome_icon,
"ios_sound": ios_sound,
"android_sound": android_sound,
"adm_sound": adm_sound,
"wp_sound": wp_sound,
"wp_wns_sound": wp_wns_sound,
"android_led_color": android_led_color,
"android_accent_color": android_accent_color,
"android_visibility": android_visibility,
"ios_badgeType": ios_badge_type,
"ios_badgeCount": ios_badge_count,
"collapse_id": collapse_id,
"apns_alert": apns_alert
}
assert isinstance(send_after, datetime.datetime) or not send_after, \
"'send_after' has to be an instance of datetime.datetime"
if send_after:
send_after = send_after.strftime('%Y-%m-%d %H:%M:%S %Z')
self.check_type(delayed_option, "delayed_option", str)
self.check_type(delivery_time_of_day, "delivery_time_of_day", str)
self.check_type(ttl, "ttl", int)
self.check_type(priority, "priority", int)
self.delivery_data = {
"send_after": send_after,
"delayed_option": delayed_option,
"delivery_time_of_day": delivery_time_of_day,
"ttl": ttl,
"priority": priority
}
self.check_type(android_group, "android_group", str)
self.check_type(android_group_message, "android_group_message", dict)
self.check_type(adm_group, "adm_group", str)
self.check_type(adm_group_message, "adm_group_message", dict)
self.grouping_and_collapsing_data = {
"android_group": android_group,
"android_group_message": android_group_message,
"adm_group": adm_group,
"adm_group_message": adm_group_message
}
self.check_type(is_ios, "is_ios", bool)
self.check_type(is_android, "is_android", bool)
self.check_type(is_any_web, "is_any_web", bool)
self.check_type(is_email, "is_email", bool)
self.check_type(is_chrome_web, "is_chrome_web", bool)
self.check_type(is_firefox, "is_firefox", bool)
self.check_type(is_safari, "is_safari", bool)
self.check_type(is_wp, "is_wp", bool)
self.check_type(is_wp_wns, "is_wp_wns", bool)
self.check_type(is_adm, "is_adm", bool)
self.check_type(is_chrome, "is_chrome", bool)
self.platform_to_deliver_to_data = {
"isIos": is_ios,
"isAndroid": is_android,
"isAnyWeb": is_any_web,
"isEmail": is_email,
"isChromeWeb": is_chrome_web,
"isFirefox": is_firefox,
"isWP": is_wp,
"isWP_WNS": is_wp_wns,
"isAdm": is_adm,
"isChrome": is_chrome
}
def check_type(self, variable, variable_string, type):
class_of_variable = str(type().__class__)[8:-2]
if variable is not None:
assert isinstance(variable, type), \
"'{}' has to be a {}".format(
variable_string, class_of_variable)
def get_common_data(self):
return {
**self.content_data,
**self.email_content_data,
**self.attachments_data,
**self.action_buttons_data,
**self.appearance_data,
**self.delivery_data,
**self.grouping_and_collapsing_data,
**self.platform_to_deliver_to_data
}
|
from pathlib import Path
import pytest
import shutil
import sys
from tempfile import TemporaryDirectory
from testpath import assert_isdir, MockCommand
from flit import build, common
samples_dir = Path(__file__).parent / 'samples'
LIST_FILES_TEMPLATE = """\
#!{python}
import sys
from os.path import join
if '--deleted' not in sys.argv:
print('pyproject.toml')
print('{module}')
print('EG_README.rst')
"""
def test_build_main():
with TemporaryDirectory() as td:
pyproject = Path(td, 'pyproject.toml')
shutil.copy(str(samples_dir / 'module1-pkg.toml'), str(pyproject))
shutil.copy(str(samples_dir / 'module1.py'), td)
shutil.copy(str(samples_dir / 'EG_README.rst'), td)
Path(td, '.git').mkdir() # Fake a git repo
with MockCommand('git', LIST_FILES_TEMPLATE.format(
python=sys.executable, module='module1.py')):
res = build.main(pyproject)
assert res.wheel.file.suffix == '.whl'
assert res.sdist.file.name.endswith('.tar.gz')
assert_isdir(Path(td, 'dist'))
def test_build_module_no_docstring():
with TemporaryDirectory() as td:
pyproject = Path(td, 'pyproject.toml')
shutil.copy(str(samples_dir / 'no_docstring-pkg.toml'), str(pyproject))
shutil.copy(str(samples_dir / 'no_docstring.py'), td)
shutil.copy(str(samples_dir / 'EG_README.rst'), td)
Path(td, '.git').mkdir() # Fake a git repo
with MockCommand('git', LIST_FILES_TEMPLATE.format(
python=sys.executable, module='no_docstring.py')):
with pytest.raises(common.NoDocstringError) as exc_info:
build.main(pyproject)
assert 'no_docstring.py' in str(exc_info.value)
|
import lab as B
from gpcm.normal import NaturalNormal
from stheno import Normal
from .util import approx
def test_natural_normal():
chol = B.randn(2, 2)
dist = Normal(B.randn(2, 1), B.reg(chol @ chol.T, diag=1e-1))
nat = NaturalNormal.from_normal(dist)
# Test properties.
assert dist.dtype == nat.dtype
for name in ["dim", "mean", "var", "m2"]:
approx(getattr(dist, name), getattr(nat, name))
# Test sampling.
state = B.create_random_state(dist.dtype, seed=0)
state, sample = nat.sample(state, num=1_000_000)
emp_mean = B.mean(B.dense(sample), axis=1, squeeze=False)
emp_var = (sample - emp_mean) @ (sample - emp_mean).T / 1_000_000
approx(dist.mean, emp_mean, rtol=5e-2)
approx(dist.var, emp_var, rtol=5e-2)
# Test KL.
chol = B.randn(2, 2)
other_dist = Normal(B.randn(2, 1), B.reg(chol @ chol.T, diag=1e-2))
other_nat = NaturalNormal.from_normal(other_dist)
approx(dist.kl(other_dist), nat.kl(other_nat))
# Test log-pdf.
x = B.randn(2, 1)
approx(dist.logpdf(x), nat.logpdf(x))
|
from flask import (Flask,
render_template,
request,
redirect,
url_for,
flash,
jsonify)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from tables import Base, Genre, Band, User
# importing for password security
from flask import session as login_session
import random
import string
# importing for OAuth
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Music Application"
# Connect to database
engine = create_engine('sqlite:///favoritemusic.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(
random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code, now compatible with Python3
request.get_data()
code = request.data.decode('utf-8')
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
# Submit request, parse response - Python3 compatible
h = httplib2.Http()
response = h.request(url, 'GET')[1]
str_response = response.decode('utf-8')
result = json.loads(str_response)
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps
('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# see if user exists, if not, make a new one
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius:' \
' 150px;-webkit-border-radius:' \
' 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
print 'Access Token is None'
response = make_response(json.dumps(
'Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \
% login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps(
'Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
return redirect('/')
else:
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
return redirect('/')
# JSON APIs to view Genre Information #DONE
@app.route('/genres/<int:genre_id>/band/JSON')
def bandItem(genre_id):
genre = session.query(Genre).filter_by(id=genre_id).one()
items = session.query(Band).filter_by(
genre_id=genre_id).all()
return jsonify(BandItems=[i.serialize for i in items])
# ADD JSON ENDPOINT HERE
@app.route('/genres/<int:genre_id>/band/<int:band_id>/JSON')
def bandItemJSON(genre_id, band_id):
bandItem = session.query(Band).filter_by(id=band_id).one()
return jsonify(BandItem=bandItem.serialize)
@app.route('/genres/JSON')
def genresJSON():
genres = session.query(Genre).all()
return jsonify(genres=[g.serialize for g in genres])
# Show all Genres
@app.route('/')
@app.route('/genres')
def homePage():
genre = session.query(Genre).order_by(Genre.name)
if 'username' not in login_session:
return render_template('publicgenres.html', genre=genre)
else:
return render_template('main.html', genre=genre)
# Show bands information in particular genre
@app.route('/genres/<int:genre_id>/')
@app.route('/genres/<int:genre_id>/band/')
def genreItem(genre_id):
genre = session.query(Genre).filter_by(id=genre_id).one()
creator = getUserInfo(genre.user_id)
items = session.query(Band).filter_by(
genre_id=genre_id).all()
if 'username' not in login_session or \
creator.id != login_session['user_id']:
return render_template('band.html',
items=items, genre=genre, creator=creator)
else:
return render_template('band.html',
items=items, genre=genre, creator=creator)
# Create new Genre
@app.route('/genres/new/', methods=['GET', 'POST'])
def newGenre():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newGenre = Genre(
name=request.form['name'], user_id=login_session['user_id'])
session.add(newGenre)
flash('New Genre %s Successfully Created' % newGenre.name)
session.commit()
return redirect(url_for('homePage'))
else:
return render_template('newGenres.html')
# Edit a Genre
@app.route('/genres/<int:genre_id>/edit/', methods=['GET', 'POST'])
def editGenre(genre_id):
if 'username' not in login_session:
return redirect('/login')
editedGenre = session.query(
Genre).filter_by(id=genre_id).one()
if editedGenre.user_id != login_session['user_id']:
return redirect('/')
if request.method == 'POST':
if request.form['name']:
editedGenre.name = request.form['name']
return redirect(url_for('homePage', genre_id=genre_id))
else:
return render_template(
'editGenre.html', genre=editedGenre)
# Delete a Genre
@app.route('/genres/<int:genre_id>/delete/', methods=['GET', 'POST'])
def deleteGenre(genre_id):
if 'username' not in login_session:
return redirect('/login')
genreToDelete = session.query(
Genre).filter_by(id=genre_id).one()
if genreToDelete.user_id != login_session['user_id']:
flash("Not Authorized")
return redirect("/")
if request.method == 'POST':
session.delete(genreToDelete)
flash('%s Successfully Deleted' % genreToDelete.name)
session.commit()
return redirect(
url_for('homePage', genre_id=genre_id))
else:
return render_template(
'deleteGenre.html', genre=genreToDelete, genre_id=genre_id)
# Create new band
@app.route('/genres/<int:genre_id>/band/new', methods=['GET', 'POST'])
def newGenreItem(genre_id):
if 'username' not in login_session:
return redirect('/login')
genre = session.query(Genre).filter_by(id=genre_id).one()
if login_session['user_id'] != genre.user_id:
return redirect('/')
if request.method == 'POST':
newItem = Band(name=request.form['name'], description=request.form[
'description'], year=request.form['year'],
genre_id=genre_id, user_id=genre.user_id)
session.add(newItem)
session.commit()
flash('New Band %s Item Successfully Created' % (newGenreItem))
return redirect(url_for('genreItem', genre_id=genre_id))
else:
return render_template('newgenreitem.html', genre_id=genre_id)
# Edit current band
@app.route('/genres/<int:genre_id>/band/<int:band_id>/edit',
methods=['GET', 'POST'])
def editGenreItem(genre_id, band_id):
if 'username' not in login_session:
return redirect('/login')
editedItem = session.query(Band).filter_by(id=band_id).one()
genre = session.query(Genre).filter_by(id=genre_id).one()
if login_session['user_id'] != genre.user_id:
flash("Not authorized")
return redirect('/')
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['year']:
editedItem.price = request.form['year']
session.add(editedItem)
session.commit()
flash('Band Item Successfully Edited')
return redirect(url_for('genreItem', genre_id=genre_id))
else:
return render_template(
'editgenreitem.html', genre_id=genre_id,
band_id=band_id, item=editedItem)
# Delete a Band
@app.route('/genres/<int:genre_id>/band/<int:band_id>/delete',
methods=['GET', 'POST'])
def deleteGenreItem(genre_id, band_id):
if 'username' not in login_session:
return redirect('/login')
genre = session.query(Genre).filter_by(id=genre_id).one()
itemToDelete = session.query(Band).filter_by(id=band_id).one()
if login_session['user_id'] != genre.user_id:
return redirect('/')
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash('Band Item Successfully Deleted')
return redirect(url_for('genreItem', genre_id=genre_id))
else:
return render_template('deletegenreitem.html', item=itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000)
|
from setuptools import setup, find_packages
"""
The setup.py script for energy_py
"""
setup(name='energy_py',
version='0.2',
description='reinforcement learning for energy systems',
author='Adam Green',
author_email='adam.green@adgefficiency.com',
url='http://adgefficiency.com/',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data = {'':['*.csv']},
setup_requires=['pytest-runner'],
tests_require=['pytest'],
install_requires=[]
)
|
from ..slack import SlackClient
from queue import Queue
from queue import Empty as QueueEmpty
from threading import Thread, Event
from ..util.log import getLogger
_log = getLogger('stream.reader')
class Reader(Thread):
def __init__(self, token):
self._client = SlackClient(token) # create slack client
self._output = Queue()
self._exit = Event()
self._read_int = 1
super(Reader, self).__init__()
self.daemon = True # die on process exit
self._log = _log.getChild('reader')
self._id, self._user, = self._retrieve_id()
self._channel_cache = {}
def _handle_event(self, event):
self._log.debug('got event type: %s'%event['type'])
self._output.put(event)
def _retrieve_id(self):
success, resp = self._client.api_call('auth.test')
print(resp)
if not success:
raise Exception('Invalid slack credentials')
return resp['user_id'], resp['user']
def _is_public(self, channel):
if not channel or not isinstance(channel, str):
return True
if not channel in self._channel_cache:
success, resp = self._client.api_call('im.list')
if success:
private = [ch['id'] for ch in resp.get('ims', [])]
self._channel_cache[channel] = not channel in private
return self._channel_cache[channel]
@property
def events(self):
while not self._exit.isSet():
try:
event = self._output.get(True, 5)
if event:
event['public'] = self._is_public(event.get('channel', None))
yield event
except QueueEmpty:
pass
def run(self):
delay = 1
self._log.debug('starting reader, initial backoff %i'%delay)
while not self._exit.isSet():
self._log.debug('connecting to slack rtm...')
if self._client.rtm_connect():
self._log.debug('connected, waiting for events...')
delay = 2
while not self._exit.isSet():
event = self._client.rtm_read()
if event:
event['self'] = event.get('user') == self._id
if not 'channel' in event.keys():
event['channel'] = None
self._handle_event(event)
else:
self._log.debug('connection failed')
if delay <= 16:
delay += delay
self._log.debug('increasing backoff to %i'%delay)
time.sleep(delay)
def join(self):
self._exit.set()
self._log.debug('reader exiting...')
self._client.rtm_close()
return super(Reader, self).join() |
import os
import csv
import inspect
import pydoc
import re
from nose.tools import make_decorator
class BaselineRow(object):
def __init__(self, csvrow):
num_fields = 7
padded_row = csvrow + [''] * max(0, num_fields - len(csvrow))
(self.module,
self.test,
self.status,
self.exception,
self.message,
self.priority,
self.notes) = padded_row[:num_fields]
self.other_fields = padded_row[num_fields:]
def __str__(self):
return ', '.join(self.as_csv_row())
def as_csv_row(self):
return [self.module, self.test, self.status, self.exception, self.message, self.priority, self.notes] + self.other_fields
def add_annotations():
if 'S3_CS_BASELINE' not in os.environ:
return
with open(os.environ['S3_CS_BASELINE']) as baseline:
for baseline_row in map(BaselineRow, csv.reader(baseline)):
add_in_baseline(baseline_row)
add_priority(baseline_row)
add_expected_error(baseline_row)
add_result(baseline_row)
def add_in_baseline(row):
test = _get_test(row.module, row.test)
if test:
_set_test_attr(test, 'cs_in_baseline', True)
def add_priority(row):
if row.priority:
test = _get_test(row.module, row.test)
if test:
_set_test_attr(test, 'cs_priority', row.priority)
def add_expected_error(row):
if not row.exception:
return
test = _get_test(row.module, row.test)
if test:
module_or_class = pydoc.locate(row.module)
exception_type = pydoc.locate(row.exception)
decorator = raises_error_with_message(exception_type, row.message)
decorated = decorator(test)
setattr(module_or_class, row.test, decorated)
def add_result(row):
print 'add_result', row
if not row.status:
return
test = _get_test(row.module, row.test)
if test:
print 'set cs_status'
_set_test_attr(test, 'cs_status', row.status)
def _set_test_attr(test, attr_name, attr_val):
if inspect.ismethod(test):
setattr(test.__func__, attr_name, attr_val)
else:
setattr(test, attr_name, attr_val)
def _get_test(module_name, test_name):
return pydoc.locate('%s.%s' % (module_name, test_name))
def raises_error_with_message(exception, message_pattern):
"""
Decorator that expects a test to throw an exception with a specific message.
Used to baseline current test behavior. Only activated if S3_CS_EXPECT_FAILURES is set.
"""
def decorate(func):
if 'S3_CS_EXPECT_FAILURES' not in os.environ:
return func
name = func.__name__
def newfunc(*arg, **kw):
try:
func(*arg, **kw)
except exception as e:
assert re.search(message_pattern, str(e)), '%s() threw exception with unexpected message.\nExpected: %s\nGot: %s' % (name, message_pattern, e)
except:
raise
else:
message = "%s() did not raise %s" % (name, exception)
raise AssertionError(message)
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
|
#coding: utf-8
import math
import numpy as np
import torch
from torch import nn
from torch.nn import Parameter
from .cuda_helper import zeros, Tensor, LongTensor
from .gumbel import gumbel_max
from .storage import Storage
class SequenceBatchNorm(nn.Module):
def __init__(self, num_features):
# seqlen * batch * XXXXX * num_features
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm1d(num_features)
def forward(self, incoming, length):
incoming_shape = incoming.shape
seqlen = incoming_shape[0]
batch_num = incoming_shape[1]
assert self.num_features == incoming_shape[-1]
assert len(length) == incoming_shape[1]
incoming = incoming.reshape(seqlen, batch_num, -1, self.num_features)
arr = []
for i, l in enumerate(length):
arr.append(incoming[:l, i])
alllen = np.sum(length)
incoming = torch.cat(arr, dim=0)
incoming = self.bn(incoming.view(-1, self.num_features)).view(alllen, -1, self.num_features)
#arr = []
now = 0
other_dim = incoming.shape[-2]
res = zeros(seqlen, batch_num, other_dim, self.num_features)
for i, l in enumerate(length):
#arr.append(torch.cat([incoming[now:now+l], zeros(seqlen-l, other_dim, self.num_features)], dim=0))
res[:l, i] = incoming[now:now+l]
now += l
#incoming = torch.stack(arr, 1)
return res.view(*incoming_shape)
|
"""
Functional test
Big Share Editor Epic
Storyboard is defined within the comments of the program itself
"""
import unittest
from flask import url_for
from biblib.views.http_errors import NO_PERMISSION_ERROR
from biblib.tests.stubdata.stub_data import UserShop, LibraryShop
from biblib.tests.base import MockEmailService, MockSolrBigqueryService,\
TestCaseDatabase, MockEndPoint
class TestBigShareEditorEpic(TestCaseDatabase):
"""
Base class used to test the Big Share Editor Epic
"""
def test_big_share_editor(self):
"""
Carries out the epic 'Big Share Editor', where a user creates a library
and wants one other use to have editing permissions, i.e., add and
remove bibcodes from the library.
:return: no return
"""
# Stub data for users, etc.
user_dave = UserShop()
user_mary = UserShop()
library_dave = LibraryShop()
# Librarian Dave makes a big library full of content
url = url_for('userview')
response = self.client.post(
url,
data=library_dave.user_view_post_data_json,
headers=user_dave.headers
)
library_id_dave = response.json['id']
self.assertEqual(response.status_code, 200, response)
# Dave adds content to his library
libraries_added = []
number_of_documents = 20
for i in range(number_of_documents):
# Add document
library = LibraryShop()
url = url_for('documentview', library=library_id_dave)
response = self.client.post(
url,
data=library.document_view_post_data_json('add'),
headers=user_dave.headers
)
self.assertEqual(response.json['number_added'],
len(library.bibcode))
self.assertEqual(response.status_code, 200, response)
libraries_added.append(library)
# Checks they are all in the library
url = url_for('libraryview', library=library_id_dave)
canonical_bibcode = [i.get_bibcodes()[0] for i in libraries_added]
with MockSolrBigqueryService(
canonical_bibcode=canonical_bibcode) as BQ, \
MockEndPoint([user_dave]) as EP:
response = self.client.get(
url,
headers=user_dave.headers
)
self.assertTrue(len(response.json['documents']) == number_of_documents)
# Dave is too busy to do any work on the library and so asks his
# librarian friend Mary to do it. Dave does not realise she cannot
# add without permissions and Mary gets some error messages
url = url_for('documentview', library=library_id_dave)
response = self.client.post(
url,
data=library.document_view_post_data_json('add'),
headers=user_mary.headers
)
self.assertEqual(response.status_code, NO_PERMISSION_ERROR['number'])
self.assertEqual(response.json['error'], NO_PERMISSION_ERROR['body'])
# Dave now adds her account to permissions. She already has an ADS
# account, and so Dave adds her with her e-mail address with read and
# write permissions (but not admin).
url = url_for('permissionview', library=library_id_dave)
with MockEmailService(user_mary):
response = self.client.post(
url,
data=user_mary.permission_view_post_data_json({'read': False, 'write': True, 'admin': False, 'owner': False}),
headers=user_dave.headers
)
self.assertEqual(response.status_code, 200)
# Mary looks at the library
canonical_bibcode = [i.get_bibcodes()[0] for i in libraries_added]
url = url_for('libraryview', library=library_id_dave)
with MockSolrBigqueryService(
canonical_bibcode=canonical_bibcode) as BQ, \
MockEndPoint([user_dave, user_dave]) as EP:
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.json['documents']) == number_of_documents)
# Mary removes a few bibcodes and keeps a list of the ones she
# removed just in case
url = url_for('documentview', library=library_id_dave)
libraries_removed = []
for i in range(number_of_documents // 2):
# Remove documents
response = self.client.post(
url,
data=libraries_added[i].document_view_post_data_json('remove'),
headers=user_mary.headers
)
self.assertEqual(response.json['number_removed'],
len(libraries_added[i].bibcode))
self.assertEqual(response.status_code, 200, response)
libraries_removed.append(libraries_added[i])
libraries_added.remove(libraries_added[i])
# She checks that they got removed
canonical_bibcode = [i.get_bibcodes()[0] for i in libraries_added]
url = url_for('libraryview', library=library_id_dave)
with MockSolrBigqueryService(
canonical_bibcode=canonical_bibcode) as BQ, \
MockEndPoint([user_dave, user_mary]) as EP:
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertTrue(
len(response.json['documents']) == number_of_documents // 2
)
# Dave asks Mary to re-add the ones she removed because they were
# actually useful
url = url_for('documentview', library=library_id_dave)
for library in libraries_removed:
# Add documents
response = self.client.post(
url,
data=library.document_view_post_data_json('add'),
headers=user_mary.headers
)
self.assertEqual(response.json['number_added'],
len(library.bibcode))
self.assertEqual(response.status_code, 200, response)
libraries_added.append(library)
canonical_bibcode.extend(library.get_bibcodes())
# She checks that they got added
url = url_for('libraryview', library=library_id_dave)
with MockSolrBigqueryService(canonical_bibcode=canonical_bibcode) \
as BQ, MockEndPoint([user_dave, user_mary]) as EP:
response = self.client.get(
url,
headers=user_mary.headers
)
self.assertTrue(
len(response.json['documents']) == number_of_documents
)
# Sanity check
# Dave removes her permissions and Mary tries to modify the library
# content, but cannot
url = url_for('permissionview', library=library_id_dave)
with MockEmailService(user_mary):
response = self.client.post(
url,
data=user_mary.permission_view_post_data_json({'read': False, 'write': False, 'admin': False, 'owner': False}),
headers=user_dave.headers
)
self.assertEqual(response.status_code, 200)
# Mary tries to add content
url = url_for('documentview', library=library_id_dave)
response = self.client.post(
url,
data=library.document_view_post_data_json('add'),
headers=user_mary.headers
)
self.assertEqual(response.status_code, NO_PERMISSION_ERROR['number'])
self.assertEqual(response.json['error'], NO_PERMISSION_ERROR['body'])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
#test function
def check (y,x,n):
#check rows for same number (n)
for i in range(0,9):
if grid[y][i] == n:
return False
#check colum for same number (n)
for i in range(0,9):
if grid[i][x] == n:
return False
#Check Cell for same number (n)
cellx = (x//3)*3
celly = (y//3)*3
for i in range(0,3):
for j in range(0,3):
if grid[celly+i][cellx+j] == n:
return False
#return true if pass row,col,cell check
return True
#Display function
def printgrid(grid):
gridline = "|----------+----------+----------|"
bordline = "----------------------------------"
print(bordline)
for x in range(9):
for y in range(9):
if ((x == 3 or x == 6) and y == 0):
print(gridline)
if (y == 0 or y == 3 or y== 6):
print("|", end=" ")
print(" " + str(grid[x][y]), end=" ")
if (y == 8):
print("|")
print(bordline)
#Bactrack Implementation
def solve ():
for y in range(9):
for x in range(9):
if grid[y][x] == 0:
for n in range(1,10):
if check(y,x,n):
grid[y][x] = n
solve()
grid[y][x] = 0
return
printgrid(grid)
#row by row grid
row = 9
grid = []
for block in range(row):
grid.append([int(n) for n in input("Enter Row "+str(block)+": ").split( )])
printgrid(grid)
solve()
input("Exit?") |
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXIPGUI(gxapi_cy.WrapIPGUI):
"""
GXIPGUI class.
This class is used in the `GXIP <geosoft.gxapi.GXIP>` System for `GXGUI <geosoft.gxapi.GXGUI>` functions
such as defining parameters for pseudo-section plots.
"""
def __init__(self, handle=0):
super(GXIPGUI, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXIPGUI <geosoft.gxapi.GXIPGUI>`
:returns: A null `GXIPGUI <geosoft.gxapi.GXIPGUI>`
:rtype: GXIPGUI
"""
return GXIPGUI()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def modify_job(cls, ip, db, ini, plot_type, page):
"""
Modify parameters for an `GXIP <geosoft.gxapi.GXIP>` plot.
:param ip: `GXDH <geosoft.gxapi.GXDH>` Handle
:param db: `GXDB <geosoft.gxapi.GXDB>` Handle
:param ini: Job Name (``*.inp``)
:param plot_type: Job type :ref:`IP_PLOT`
:param page: Page to open `GXGUI <geosoft.gxapi.GXGUI>` on
:type ip: GXIP
:type db: GXDB
:type ini: str
:type plot_type: int
:type page: int_ref
:returns: 0 - Ok
-1 - User Cancelled
:rtype: int
.. versionadded:: 6.1
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
"""
ret_val, page.value = gxapi_cy.WrapIPGUI._modify_job(GXContext._get_tls_geo(), ip, db, ini.encode(), plot_type, page.value)
return ret_val
@classmethod
def launch_ipqc_tool(cls, db, line, chan):
"""
Launch the In-Line `GXIP <geosoft.gxapi.GXIP>` QC tool on a database.
:param db: Database name
:param line: Current Line (can be blank)
:param chan: Channel to open with (can be blank)
:type db: str
:type line: str
:type chan: str
.. versionadded:: 8.1
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** The database should be a currently open database.
"""
gxapi_cy.WrapIPGUI._launch_ipqc_tool(GXContext._get_tls_geo(), db.encode(), line.encode(), chan.encode())
@classmethod
def launch_offset_ipqc_tool(cls, db, line, chan):
"""
Launch the Offset `GXIP <geosoft.gxapi.GXIP>` QC tool on a database.
:param db: Database name
:param line: Current Line (can be blank)
:param chan: Channel to open with (can be blank)
:type db: str
:type line: str
:type chan: str
.. versionadded:: 9.1
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** The database should be a currently open database.
"""
gxapi_cy.WrapIPGUI._launch_offset_ipqc_tool(GXContext._get_tls_geo(), db.encode(), line.encode(), chan.encode())
@classmethod
def ipqc_tool_exists(cls):
"""
See if there is an IPQC Tool (Offset or Inline) already open.
:returns: 0 if not open, 1 if open
:rtype: int
.. versionadded:: 8.1
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** See if there is an IPQC Tool already open.
"""
ret_val = gxapi_cy.WrapIPGUI._ipqc_tool_exists(GXContext._get_tls_geo())
return ret_val
@classmethod
def launch_remove_contributing_electrodes_ext_tool(cls, db, map):
"""
Launch the Remove Contributing Electrodes dialog.
:param db: Database name
:param map: Current Map
:type db: str
:type map: str
.. versionadded:: 9.4
**License:** `Geosoft Extended End-User License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-ext-end-user-lic>`_
**Note:** This tool removes the `GXEXT <geosoft.gxapi.GXEXT>` object that allows the database-map linking between an IP data base
and IP plan map for plotting contributing electrodes for a given database row of data.
"""
gxapi_cy.WrapIPGUI._launch_remove_contributing_electrodes_ext_tool(GXContext._get_tls_geo(), db.encode(), map.encode())
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetBillingAccountResult',
'AwaitableGetBillingAccountResult',
'get_billing_account',
'get_billing_account_output',
]
@pulumi.output_type
class GetBillingAccountResult:
def __init__(__self__, display_name=None, master_billing_account=None, name=None, open=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if master_billing_account and not isinstance(master_billing_account, str):
raise TypeError("Expected argument 'master_billing_account' to be a str")
pulumi.set(__self__, "master_billing_account", master_billing_account)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if open and not isinstance(open, bool):
raise TypeError("Expected argument 'open' to be a bool")
pulumi.set(__self__, "open", open)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The display name given to the billing account, such as `My Billing Account`. This name is displayed in the Google Cloud Console.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="masterBillingAccount")
def master_billing_account(self) -> str:
"""
If this account is a [subaccount](https://cloud.google.com/billing/docs/concepts), then this will be the resource name of the parent billing account that it is being resold through. Otherwise this will be empty.
"""
return pulumi.get(self, "master_billing_account")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, `billingAccounts/012345-567890-ABCDEF` would be the resource name for billing account `012345-567890-ABCDEF`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def open(self) -> bool:
"""
True if the billing account is open, and will therefore be charged for any usage on associated projects. False if the billing account is closed, and therefore projects associated with it will be unable to use paid services.
"""
return pulumi.get(self, "open")
class AwaitableGetBillingAccountResult(GetBillingAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBillingAccountResult(
display_name=self.display_name,
master_billing_account=self.master_billing_account,
name=self.name,
open=self.open)
def get_billing_account(billing_account_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBillingAccountResult:
"""
Gets information about a billing account. The current authenticated user must be a [viewer of the billing account](https://cloud.google.com/billing/docs/how-to/billing-access).
"""
__args__ = dict()
__args__['billingAccountId'] = billing_account_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:cloudbilling/v1:getBillingAccount', __args__, opts=opts, typ=GetBillingAccountResult).value
return AwaitableGetBillingAccountResult(
display_name=__ret__.display_name,
master_billing_account=__ret__.master_billing_account,
name=__ret__.name,
open=__ret__.open)
@_utilities.lift_output_func(get_billing_account)
def get_billing_account_output(billing_account_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBillingAccountResult]:
"""
Gets information about a billing account. The current authenticated user must be a [viewer of the billing account](https://cloud.google.com/billing/docs/how-to/billing-access).
"""
...
|
#Dependencies
from flask import Flask, render_template, redirect, url_for
import scrape_mars
import pymongo
import pandas as pd
app = Flask(__name__)
#use flask_pymongo to set up mongo conncetion
conn = "mongodb://localhost:27017/mars_app"
client = pymongo.MongoClient(conn)
#mars_db = client.db.mars
#root taht queries Mongo DB and pass in mars as html
@app.route("/")
def index():
mars_data = client.db.mars.find_one()
return render_template("index.html", mars = mars_data)
#
@app.route("/scrape")
def scrape():
#Run the scrape function
mars = scrape_mars.scrape_all()
#Update the Mongo database using update and upsert=True
client.db.mars.update({}, mars, upsert = True)
return redirect("/")
if __name__ == "__main__":
app.run(debug=True) |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Max pooling 2D layer."""
# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
from keras.layers.pooling.base_pooling2d import Pooling2D
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.MaxPool2D', 'keras.layers.MaxPooling2D')
class MaxPooling2D(Pooling2D):
"""Max pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the maximum value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output,
when using the `"valid"` padding option, has a spatial shape
(number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
For example, for `strides=(1, 1)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='valid')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy=
array([[[[5.],
[6.]],
[[8.],
[9.]]]], dtype=float32)>
For example, for `strides=(2, 2)` and `padding="valid"`:
>>> x = tf.constant([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = tf.reshape(x, [1, 3, 4, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(2, 2), padding='valid')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 1, 2, 1), dtype=float32, numpy=
array([[[[6.],
[8.]]]], dtype=float32)>
Usage Example:
>>> input_image = tf.constant([[[[1.], [1.], [2.], [4.]],
... [[2.], [2.], [3.], [2.]],
... [[4.], [1.], [1.], [1.]],
... [[2.], [2.], [1.], [4.]]]])
>>> output = tf.constant([[[[1], [0]],
... [[0], [1]]]])
>>> model = tf.keras.models.Sequential()
>>> model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... input_shape=(4, 4, 1)))
>>> model.compile('adam', 'mean_squared_error')
>>> model.predict(input_image, steps=1)
array([[[[2.],
[4.]],
[[4.],
[4.]]]], dtype=float32)
For example, for stride=(1, 1) and padding="same":
>>> x = tf.constant([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = tf.reshape(x, [1, 3, 3, 1])
>>> max_pool_2d = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),
... strides=(1, 1), padding='same')
>>> max_pool_2d(x)
<tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
array([[[[5.],
[6.],
[6.]],
[[8.],
[9.],
[9.]],
[[8.],
[9.],
[9.]]]], dtype=float32)>
Args:
pool_size: integer or tuple of 2 integers,
window size over which to take the maximum.
`(2, 2)` will take the max value over a 2x2 pooling window.
If only one integer is specified, the same window length
will be used for both dimensions.
strides: Integer, tuple of 2 integers, or None.
Strides values. Specifies how far the pooling window moves
for each pooling step. If None, it will default to `pool_size`.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, rows, cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, rows, cols)`.
Output shape:
- If `data_format='channels_last'`:
4D tensor with shape `(batch_size, pooled_rows, pooled_cols, channels)`.
- If `data_format='channels_first'`:
4D tensor with shape `(batch_size, channels, pooled_rows, pooled_cols)`.
Returns:
A tensor of rank 4 representing the maximum pooled values. See above for
output shape.
"""
def __init__(self,
pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
super(MaxPooling2D, self).__init__(
tf.compat.v1.nn.max_pool,
pool_size=pool_size, strides=strides,
padding=padding, data_format=data_format, **kwargs)
# Alias
MaxPool2D = MaxPooling2D
|
#Copyright 2015 Patrick Porter
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
## http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import xml.etree.ElementTree as ET
import logging
import json
import time
import Levenshtein
from functools import partial
from multiprocessing import Pool
import os
import glob
import subprocess
from operator import itemgetter
import datamodel
localDir = os.path.dirname(__file__)
absDir = os.path.join(os.getcwd(), localDir)
def get_lev_ratio(searchstring, minscore, casecost, comparestring):
"""Uses python-Levenshtein and performs a character-based Levenshtein distance calculation and returns a percentage score
minscore should be a float between 0 and 1; casecost represents the cost of
replacements that merely involve a change in case frome lower to upper or vice versa
A casecost of less than 1 warps results in favor of strings differing only by case.
The algorithm performs 2 lookups through the matrix..one with the actual strings,
and one with both strings lowercased, calculating the case difference based on the
difference between the two scores.
"""
searchstring = str.strip(searchstring) #don't want to leave spaces and returns at ends
comparestring = str.strip(comparestring) #don't want to leave spaces and returns at ends
sumlen = len(searchstring) + len(comparestring)
d1 = Levenshtein.distance(searchstring, comparestring)
d2 = Levenshtein.distance(str.lower(searchstring), str.lower(comparestring))
#d1 = editdistance.eval(searchstring, comparestring)
#d2 = editdistance.eval(str.lower(searchstring), str.lower(comparestring)) #editdistance library is slightly slower than python-Levenshtein
diff = d1-d2
dresult = d2 + (diff*casecost)
ratio = (sumlen-dresult) / sumlen
if ratio >= minscore:
return tuple([comparestring, ratio])
else:
return
class TmProvider(object):
"""Provides methods for searching a set of string data for exact and fuzzy matches,
as well as for loading, deleting, and otherwise maintaining the data"""
def __init__(self, config):
"""cores is the max number of processor cores that will be used for
Levenshtein calculation during search.
use_mysql defaults to False (in which case sqlite is used),
but if set to True will use MySql (DB must be already created/configured)"""
self.num_cores = config['numcores']
self.use_mysql=config['use_mysql']
self.data = {}
self.tms = {}
self.currently_loading = False
self.loaded = False
self.data_mgr = datamodel.TmData(config)
def load_tm_to_memory(self, tm_id):
"""Loads data for a given translation memory document from DB to memory for faster searching"""
tm_id=int(tm_id) #type conversion to int in case not done before passing
#set status indicator variables
self.loaded=False
self.currently_loading=True
#add the tm from the db to this instance's dict of loaded TMs
tm = self.data_mgr.get_tms().get(tm_id)
status = "success"
if tm:
self.tms[tm_id]=tm
#load the TUs to this instance's dict of loaded TUs
#by passing the current dict it adds the new items to the current dict
#otherwise a new dict is created
#test
#size1 = len(self.data)
self.data = self.data_mgr.get_tus(tm_id, self.data)
#test
#size2 = len(self.data)
else:
status = "no TM with ID of '{0}' exists".format(tm_id)
#reset status indicators and return
self.currently_loading=False
self.loaded=True
return {'status' : status}
def list_tms(self, user):
"""Lists the translation memory documents (TMX files) that have been imported into the database
and are available for loading into memory and searching"""
all_tms = self.data_mgr.get_tms()
results = []
for tm in all_tms: #only return the TMs that the user can read and indicate whether read-only
can_read = False
can_write = False
if ((self.data_mgr.get_owner(tm) == user) or
(user in self.data_mgr.get_tm_read_write_group_users(tm)) or
(user in self.data_mgr.get_admin_users())):
can_read = True
can_write = True
if user in self.data_mgr.get_tm_read_group_users(tm):
can_read = True
if can_read or can_write:
result = all_tms[tm]
result['can_read'] = can_read
result['can_write'] = can_write
results.append(result)
return results
def delete_tm_from_db(self, tm_id):
"""Permanently deletes all the data related to a previously-loaded
translation memory document from the DB. Careful..no going back unless
the DB has been backed up."""
deleted_sourcetexts={}
for sourcetext in self.data.keys():
delete_items=[]
for item in self.data[sourcetext]:
if str(item['tm_id'])==tm_id:
delete_items.append(item)
deleted_sourcetexts[sourcetext]=item['tu_id'] #to pop from the dict after iterating, if empty
#now after iterating we can remove the list items
for item in delete_items:
self.data[sourcetext].remove(item) #remove all items whose tm_id matches the one deleted from the DB
#now after iterating we can remove sourcetext keys that have no TU items left
for item in deleted_sourcetexts.keys():
if len(self.data[item])==0:
self.data.pop(item)
#now delete TM from in-memory TM list
self.tms.pop(int(tm_id))
#now delete from disk
self.data_mgr.delete_tm_by_id(tm_id)
self.data_mgr.delete_tus_by_tm_id(tm_id)
return {'status' : 'success'}
def delete_tu(self, tm_id, source, target):
"""Permanently deletes a TU based on sourcetext/targettext pair from the specified TM"""
#delete from memory
if source in self.data: #only do if actually in memory
sourcematches = self.data[source]
for item in sourcematches:
if str(item['tm_id']) == tm_id and item['targettext']==target:
self.data[source].remove(item) #remove all items whose tm_id matches the one deleted from the DB
#now if the sourcetext key has no TU items left, remove the key
if len(self.data[source])==0:
self.data.pop(source)
#now from DB
existing_tus = self.data_mgr.get_tus_from_sourcetext(tm_id, source)
tu_ids = []
if existing_tus:
for item in existing_tus[source]:
if item['targettext']==target:
tu_ids.append(item['tu_id'])
for tu_id in tu_ids:
self.data_mgr.delete_tu_by_tu_id(tu_id)
return {'status' : 'success'}
def add_or_update_tu(self, tm_id, source, target, user, allow_multiple=False, overwrite_with_new=True):
"""Adds a source text/target text pair to the specified translation memory,
in the DB as well as the in-memory TM"""
existing_tus = self.data_mgr.get_tus_from_sourcetext(tm_id, source)
existing_targets = [x['targettext'] for x in existing_tus[source]] if existing_tus else []
if target in existing_targets: #skip if there is a TU with the same source and target
return {'status' : 'tu not added or updated because one with the same source text and target text already exists'}
elif (not existing_tus) or (not allow_multiple):
status = 'TU added'
if existing_tus and overwrite_with_new: #if the source exists and overwrite = true, we are going to delete all existing TUs with that source and add this as new
for i in range(len(existing_tus[source])):
self.data_mgr.delete_tu_by_tu_id(existing_tus[source][i]['tu_id'])
status = 'TU(s) updated'
#now add to DB and in-memory tm
tu_id = self.data_mgr.add_tu(tm_id, source, target, user, user)
self.data[source] = {'tm_id':tm_id, 'tu_id':tu_id, 'sourcetext':source, 'targettext':target,
'created_by':user, 'changed_by':user, 'created_date':time.strftime("%Y-%m-%d %H:%M:%S"),
'changed_date':time.strftime("%Y-%m-%d %H:%M:%S"), 'last_used_date':time.strftime("%Y-%m-%d %H:%M:%S")} #add new data to memory
return {'status' : status}
elif (allow_multiple and target not in existing_targets): #if tu with the same sourcetext doesn't exist...or if allow multiple and there isn't one already with same source and target...simply add it
tu_id = self.data_mgr.add_tu(tm_id, source, target, user, user)
self.data[source].append({'tm_id':tm_id, 'tu_id':tu_id, 'sourcetext':source, 'targettext':target,
'created_by':user, 'changed_by':user, 'created_date':time.strftime("%Y-%m-%d %H:%M:%S"),
'changed_date':time.strftime("%Y-%m-%d %H:%M:%S"), 'last_used_date':time.time()}) #add new data to memory
return {'status' : 'tu added'}
def create_tm_from_memory(self, tm_name, sourcelang, targetlang, owner, data):
"""Creates a new TM and adds all the TUs in memory to it in the DB,
the 'data' parameter should be a dict whose keys are source texts and values are dicts of TU data"""
tm_id = self.data_mgr.add_tm(tm_name, "from_memory", sourcelang, targetlang, owner)
starttime=time.time()
logging.info("started import from in-memory tm to DB, tm_id: {0}".format(tm_id))
#open a data connection to keep open and send TUs one-by-one...to be committed and closed later when done
cnx = self.data_mgr.get_connection()
num_tus=0
for key in data:
value = data[key]
for i in range(len(value)):
tu = dict(value[i])
self.data_mgr.add_tu(tm_id, tu['sourcetext'], tu['targettext'], user, user,
time.strftime("%Y-%m-%d %H:%M:%S"), time.strftime("%Y-%m-%d %H:%M:%S"), time.strftime("%Y-%m-%d %H:%M:%S"), cnx)
num_tus+=1
endtime = time.time()
logging.info("processed {0} TUs\ntime: {1}".format(num_tus, endtime - starttime))
#commit and close
cnx.commit()
cnx.cursor().close()
cnx.close()
return {'status' : 'success. processed {0} TUs'.format(num_tus)}
def normalize_time_tmx_to_iso(self, timestring):
"""deals with differences in handling of iso8601, time...
i.e., the DBs seem to choke with the T and the Z"""
#string e.g.: 20140204T184725Z has to become 2014-02-04 18:47:25
y = timestring[0:4]
mon = timestring[4:6]
d = timestring[6:8]
h = timestring[9:11]
min = timestring[11:13]
s = timestring[13:15]
result = "{0}-{1}-{2} {3}:{4}:{5}".format(y,mon,d,h,min,s)
return result
def load_tmx_to_db(self, tmxfile, tm_name, owner):
"""Takes a previously uploaded TMX file and parses it, adding the translation units
and info about the TM into the DB"""
starttime=time.time()
logging.info("started TMX import...")
#TODO: check if TM already exists and error handling?????
#TODO: check for empty string name and return...i.e. make required
header = "";
srclang=""
num_tus = 0
srclang = None
tgtlang = None
parser = ET.iterparse(tmxfile)
for event, element in parser:
if element.tag == 'header':
srclang = element.attrib["srclang"]
if element.tag == 'tu':
tuvs = element.findall("tuv")
#determine which is source and which is target...and put into tm insert
lang0 = tuvs[0].attrib['{http://www.w3.org/XML/1998/namespace}lang']
lang1 = tuvs[1].attrib['{http://www.w3.org/XML/1998/namespace}lang']
if lang0 == srclang:
tgtlang = lang1
else:
tgtlang = lang1
break
#insert a new TM into the DB and return the id of the newly inserted tm
tm_id=self.data_mgr.add_tm(tm_name, tmxfile, srclang, tgtlang, owner)
#now insert TUs for the new TM
#open a data connection to keep open and send TUs one-by-one...to be committed and closed later when done
cnx = self.data_mgr.get_connection()
#parse the rest of the doc
parser = ET.iterparse(tmxfile)
for event, element in parser:
if element.tag == 'tu':
tuvs = element.findall("tuv")
lang0 = tuvs[0].attrib['{http://www.w3.org/XML/1998/namespace}lang']
if lang0 == srclang:
segtext = tuvs[0].find("seg").text
trgtext = tuvs[1].find("seg").text
else:
segtext = tuvs[1].find("seg").text
trgtext = tuvs[0].find("seg").text
created_by = element.attrib['creationid'] if 'creationid' in element.attrib else owner
#strip out the 'T' in tmx datetime stamp b/c mysql doesn't understand it
created_date = self.normalize_time_tmx_to_iso(element.attrib['creationdate']) if 'creationdate' in element.attrib else time.strftime("%Y-%m-%d %H:%M:%S")
changed_by = element.attrib['changeid'] if 'changeid' in element.attrib else owner
changed_date = self.normalize_time_tmx_to_iso(element.attrib['changedate']) if 'changedate' in element.attrib else time.strftime("%Y-%m-%d %H:%M:%S")
last_used_date = self.normalize_time_tmx_to_iso(element.attrib['lastusagedate']) if 'lastusagedate' in element.attrib else time.strftime("%Y-%m-%d %H:%M:%S")
#insert the tu...By passing the current connection, the connection will stay open without the single insert being committed
self.data_mgr.add_tu(tm_id=tm_id, sourcetext=str.strip(segtext),
targettext=str.strip(trgtext),
created_by=created_by, created_date=created_date,
changed_by=changed_by, changed_date=changed_date, last_used_date=last_used_date, connection=cnx)
element.clear()
num_tus+=1
endtime = time.time()
logging.info("processed {0} TUs\ntime: {1}".format(num_tus, endtime - starttime))
#commit and close
cnx.commit()
cnx.cursor().close()
cnx.close()
def import_tmx_file(self, file, tm_name, owner):
"""Starts an upload of a TMX file and then calls the import to DB method"""
#TODO: check for empty strings on args
#TODO: deal with file locking issues here in case 2 people are trying to load the same filename...also prevent overwriting in this case
starttime=time.time()
logging.info("started TMX import...")
size = 0
localfilename="{0}/upload/{1}".format(absDir, file.filename)
localfile = open(localfilename, 'wb')
while True:
data = file.file.read(8192)
localfile.write(data)
if not data:
break
size += len(data)
localfile.close()
endtime = time.time() - starttime
logging.info("finished TMX upload...time elapsed: {0}".format(endtime))
logging.info("started TMX parsing / DB insertion")
self.load_tmx_to_db(localfilename, tm_name, owner)
def export_tmx_file(self, tm_id):
"""Retrieves the specified TM and exports it as a TMX file"""
logging.info("started TMX export...")
#TODO: put in logic to export to TMX
def search(self, searchtext, threshold=.75, maxresults=0, casecost=.2):
"""The whole point...searches for exact and fuzzy matches;
rates and ranks, returning in descending order of match %.
threshold is the minimum match score to return.
maxresults is the maximum number of results to return (0 means no max)
casecost is the cost applied to replacements consisting of merely a case change
in the Levenshtein distance calc. A casecost of less than one warps results in favor
of strings with merely case differences."""
#type convert in case necessary
threshold=float(threshold)
casecost=float(casecost)
maxresults=int(maxresults)
logging.info("searching with Levenshtein...")
lev_start_time = time.time()
sourcelist = list(self.data.keys())
searchresults = {'data':{'matches':[]}}
pre_endtime = time.time()
logging.info("Pre-processing took {0} seconds\n".format(pre_endtime - lev_start_time))
if self.num_cores==0:
p = Pool() #uses max available
else:
p = Pool(self.num_cores)
results = set(p.map(partial(get_lev_ratio, searchtext, threshold, casecost), sourcelist)) #this has to be a list of hashable objects i think??
p.close()
endtime = time.time()
logging.info("Levenshtein lookup took {0} seconds\n".format(endtime - pre_endtime))
results.remove(None) #there will be one 'None'' element...see get_lev_ratio ..r/t multiprocessing and speed...need to return small set...is it possible to do an intermediate processing step in the map???
results = sorted(results, key=itemgetter(1), reverse=True) #sort results descending by score
count=0
for result in results:
if maxresults !=0:
if count >= maxresults: break
count+=1
sourcetext = result[0]
tus = self.data[sourcetext] #for now this is only going to return one...but we should prob change it to allow miltiple source entries
for tu in tus: #if there are multiple tus for a given shourcetext the tu select will return more than one record
#TODO: make option to retrieve editops?
#editops = Levenshtein.editops(str.strip(searchtext),str.strip(sourcetext))
score = result[1]
match = {'sourcetext':sourcetext, 'targettext':tu['targettext'], 'matchscore':score,
'created_by':tu['created_by'], 'created_date':str(tu['created_date']),
'changed_by':tu['changed_by'], 'changed_date':str(tu['changed_date']),
'last_used_date':str(tu['last_used_date'])}
searchresults['data']['matches'].append(match)
logging.info("post-processing took {0} seconds\n".format(time.time() - endtime))
return searchresults
#TODO: can we make the lev method faster...i.e. is it something to do with processing the intermediate data??? |
import cocotb
from cocotb.triggers import Timer
from uvm import (UVMTest, run_test)
from uvm.macros import (uvm_component_utils, uvm_fatal)
from master_slave_pkg import env_top
test_dur = 1000 # NS
class master_slave_test(UVMTest):
def __init__(self, name, parent):
super().__init__(name, parent)
def build_phase(self, phase):
self.env = env_top("env_master_slave", self)
async def run_phase(self, phase):
phase.raise_objection(self)
await Timer(test_dur, "NS")
phase.drop_objection(self)
def check_phase(self, phase):
if not self.env.all_ok():
uvm_fatal("ENV_NOT_OK", "There were errors in the env")
uvm_component_utils(master_slave_test)
@cocotb.test()
async def master_slave_top(dut):
await run_test()
|
#******************************************************************************
#
# MantaGen
# Copyright 2018 Steffen Wiewel, Moritz Becher, Nils Thuerey
#
# This program is free software, distributed under the terms of the
# Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
#******************************************************************************
from manta import *
from random import randint
from scenes.scene import Scene
from scenes.volumes import *
from scenes.functions import *
from util.logger import *
def instantiate_scene(**kwargs): # instantiate independent of name , TODO replace?
info(kwargs)
return SmokeSimpleScene(**kwargs)
class SmokeSimpleScene(Scene):
#----------------------------------------------------------------------------------
def __init__(self, **kwargs):
super(SmokeSimpleScene,self).__init__(**kwargs)
# optionally, init more grids etc.
self.max_iter_fac = 2
self.accuracy = 5e-4
self.max_source_count = int(kwargs.get("max_source_count", 2))
self.velocity_scale = float(kwargs.get("velocity_scale", self.resolution.y * 0.05))
self.use_inflow_sources = kwargs.get("use_inflow_sources", "True") == "True"
self.open_bound = kwargs.get("use_open_bound", "True") == "True"
self.sources = []
info("SmokeSimpleScene initialized")
#----------------------------------------------------------------------------------
def set_velocity(self, volume, velocity):
if self.dimension == 2:
velocity.z = 0.0
volume.applyToGrid(solver=self.solver, grid=self.vel, value=velocity)
#----------------------------------------------------------------------------------
def _create_scene(self):
super(SmokeSimpleScene, self)._create_scene()
self.sources = []
self.vel.setConst(vec3(0))
self.flags.initDomain(boundaryWidth=self.boundary)
self.flags.fillGrid()
if self.open_bound:
setOpenBound(self.flags, self.boundary, 'yY', CellType_TypeOutflow|CellType_TypeEmpty)
is3d = (self.dimension > 2)
source_count = randint(1, self.max_source_count)
for i in range(source_count):
volume = random_box(center_min=[0.2, 0.1, 0.2], center_max=[0.8, 0.6, 0.8], size_min=[0.005, 0.005, 0.005], size_max=[0.2, 0.2, 0.2], is3d=is3d)
velo = random_vec3( vmin=[-self.velocity_scale, -self.velocity_scale, -self.velocity_scale],
vmax=[self.velocity_scale, self.velocity_scale, self.velocity_scale])
self.sources.append( (volume, velo) )
# set velocity on startup if needed
if not self.use_inflow_sources:
self.set_velocity( volume, velo )
if self.show_gui:
# central view is more interesting for smoke
self._gui.setPlane( self.resolution.z // 2 )
info("SmokeSimpleScene created with {} sources".format(len(self.sources)))
#==================================================================================
# SIMULATION
#----------------------------------------------------------------------------------
def _compute_simulation_step(self):
advectSemiLagrange(flags=self.flags, vel=self.vel, grid=self.vel, order=2, clampMode=2)
# apply velocity source
if self.use_inflow_sources:
for vol, vel in self.sources:
self.set_velocity( vol, vel )
vorticityConfinement( vel=self.vel, flags=self.flags, strength=0.1 )
setWallBcs(flags=self.flags, vel=self.vel)
solvePressure(flags=self.flags, vel=self.vel, pressure=self.pressure, cgMaxIterFac=self.max_iter_fac, cgAccuracy=self.accuracy) |
# Expresiones Regulares IV
# Match y search
import re
nombre1="Jara Lopez"
nombre2= "Antonio Gomez"
nombre3="Lara Lopez"
if re.match(".ara", nombre1, re.IGNORECASE):
print("Hemos encontrado el nombre")
else:
print("No Hemos encontrado el nombre")
|
"""Sort module."""
from math import floor
def bubble_sort(vals):
"""Sort the given array using bubble sort."""
for i in range(len(vals) - 1):
for j in range(len(vals) - i - 1):
if vals[j] > vals[j + 1]:
vals[j], vals[j + 1] = vals[j + 1], vals[j]
return vals
def selection_sort(vals):
"""Sort the given array using selection sort."""
for i in range(len(vals) - 1):
min_index = i
for j in range(i + 1, len(vals)):
if vals[j] < vals[min_index]:
min_index = j
if min_index != i:
vals[i], vals[min_index] = vals[min_index], vals[i]
return vals
def merge_sort(vals):
"""Sort the given array using merge sort."""
if len(vals) == 1:
return vals
mid = floor(len(vals) / 2)
return _merge(merge_sort(vals[:mid]), merge_sort(vals[mid:]))
def _merge(a, b):
"""Used by "merge_sort" function to merge two sorted arrays into one sorted array."""
result = []
while len(a) > 0 and len(b) > 0:
if a[0] < b[0]:
result.append(a[0])
del a[0]
else:
result.append(b[0])
del b[0]
return [*result, *a, *b] |
import datetime
import collections
from ckan.common import OrderedDict, _
from ckanext.report import lib
import ckan.plugins as p
from ckan.plugins import toolkit
def publisher_activity(organization, include_sub_organizations=False):
"""
Contains information about the datasets a specific organization has
released in this and last quarter (calendar year). This is needed by
departments for their quarterly transparency reports.
"""
import datetime
now = datetime.datetime.now()
if organization:
quarters = get_quarter_dates(now)
created, modified = _get_activity(
organization, include_sub_organizations, quarters)
datasets = []
for quarter_name in quarters:
datasets += sorted(created[quarter_name], key=lambda x: x[1])
datasets += sorted(modified[quarter_name], key=lambda x: x[1])
columns = ('Dataset name', 'Dataset title', 'Dataset notes', 'Modified or created', 'Quarter', 'Timestamp', 'Author', 'Published')
quarters_iso = dict(
[(last_or_this, [date_.isoformat() for date_ in q_list])
for last_or_this, q_list in quarters.iteritems()])
datasets_with_title = []
for dataset in datasets:
package_dict = toolkit.get_action('package_show')({}, {'id': dataset[0]})
dataset = (dataset[1], package_dict.get('title_translated')['fi'], dataset[3], dataset[4], dataset[5],
dataset[6], dataset[7], dataset[8])
datasets_with_title.append(dataset)
return {'table': datasets_with_title, 'columns': columns,
'quarters': quarters_iso}
else:
# index
periods = get_quarter_dates_merged(now)
stats_by_org = []
totals = collections.defaultdict(int)
import ckan.model as model
all_orgs = model.Session.query(model.Group). \
filter(model.Group.type=='organization'). \
filter(model.Group.state=='active').order_by('name'). \
all()
for organization in add_progress_bar(all_orgs):
created, modified = _get_activity(
organization.name, include_sub_organizations, periods)
created_names = [dataset[1] for dataset in created.values()[0]]
modified_names = [dataset[1] for dataset in modified.values()[0]]
num_created = len(created_names)
num_modified = len(modified_names)
num_total = len(set(created_names) | set(modified_names))
stats_by_org.append(OrderedDict((
('organization name', organization.name),
('organization title', organization.title),
('num created', num_created),
('num modified', num_modified),
('total', num_total),
)))
if not include_sub_organizations:
totals['num created'] += num_created
totals['num modified'] += num_modified
totals['total'] += num_total
period_iso = [date_.isoformat()
for date_ in periods.values()[0]]
stats_by_org.sort(key=lambda x: -x['total'])
return {'table': stats_by_org,
'totals': totals,
'period': period_iso}
def get_quarter_dates(datetime_now):
'''Returns the dates for this (current) quarter and last quarter. Uses
calendar year, so 1 Jan to 31 Mar etc.'''
now = datetime_now
month_this_q_started = (now.month - 1) // 3 * 3 + 1
this_q_started = datetime.datetime(now.year, month_this_q_started, 1)
this_q_ended = datetime.datetime(now.year, now.month, now.day)
last_q_started = datetime.datetime(
this_q_started.year + (this_q_started.month-3)/12,
(this_q_started.month-4) % 12 + 1,
1)
last_q_ended = this_q_started - datetime.timedelta(days=1)
return {'this': (this_q_started, this_q_ended),
'last': (last_q_started, last_q_ended)}
def get_quarter_dates_merged(datetime_now):
'''Returns the dates for the period including this (current) quarter and
the last quarter. Uses calendar year, so 1 Jan to 31 Mar etc.'''
now = datetime_now
month_this_q_started = (now.month - 1) // 3 * 3 + 1
this_q_started = datetime.datetime(now.year, month_this_q_started, 1)
this_q_ended = datetime.datetime(now.year, now.month, now.day)
last_q_started = datetime.datetime(
this_q_started.year + (this_q_started.month-3)/12,
(this_q_started.month-4) % 12 + 1,
1)
last_q_ended = this_q_started - datetime.timedelta(days=1)
return {'this_and_last': (last_q_started, this_q_ended)}
def _get_activity(organization_name, include_sub_organizations, periods):
import ckan.model as model
from paste.deploy.converters import asbool
created = dict((period_name, []) for period_name in periods)
modified = dict((period_name, []) for period_name in periods)
# These are the authors whose revisions we ignore, as they are trivial
# changes. NB we do want to know about revisions by:
# * harvest (harvested metadata)
# * dgu (NS Stat Hub imports)
# * Fix national indicators
system_authors = ('autotheme', 'co-prod3.dh.bytemark.co.uk',
'Date format tidier', 'current_revision_fixer',
'current_revision_fixer2', 'fix_contact_details.py',
'Repoint 410 Gone to webarchive url',
'Fix duplicate resources',
'fix_secondary_theme.py',
)
system_author_template = 'script%' # "%" is a wildcard
if organization_name:
organization = model.Group.by_name(organization_name)
if not organization:
raise p.toolkit.ObjectNotFound()
if not organization_name:
pkgs = model.Session.query(model.Package) \
.all()
else:
pkgs = model.Session.query(model.Package)
pkgs = lib.filter_by_organizations(pkgs, organization,
include_sub_organizations).all()
for pkg in pkgs:
created_ = model.Session.query(model.PackageRevision) \
.filter(model.PackageRevision.id == pkg.id) \
.order_by("revision_timestamp asc").first()
pr_q = model.Session.query(model.PackageRevision, model.Revision) \
.filter(model.PackageRevision.id == pkg.id) \
.filter_by(state='active') \
.join(model.Revision) \
.filter(~model.Revision.author.in_(system_authors)) \
.filter(~model.Revision.author.like(system_author_template))
rr_q = model.Session.query(model.Package, model.ResourceRevision, model.Revision) \
.filter(model.Package.id == pkg.id) \
.filter_by(state='active') \
.join(model.ResourceRevision,
model.Package.id == model.ResourceRevision.package_id) \
.join(model.Revision) \
.filter(~model.Revision.author.in_(system_authors)) \
.filter(~model.Revision.author.like(system_author_template))
pe_q = model.Session.query(model.Package, model.PackageExtraRevision, model.Revision) \
.filter(model.Package.id == pkg.id) \
.filter_by(state='active') \
.join(model.PackageExtraRevision,
model.Package.id == model.PackageExtraRevision.package_id) \
.join(model.Revision) \
.filter(~model.Revision.author.in_(system_authors)) \
.filter(~model.Revision.author.like(system_author_template))
for period_name in periods:
period = periods[period_name]
# created
if period[0] < created_.revision_timestamp < period[1]:
published = not asbool(pkg.extras.get('unpublished'))
created[period_name].append(
(created_.id, created_.name, created_.title, lib.dataset_notes(pkg),
'created', period_name,
created_.revision_timestamp.isoformat(),
created_.revision.author, published))
# modified
# exclude the creation revision
period_start = max(period[0], created_.revision_timestamp)
prs = pr_q.filter(model.PackageRevision.revision_timestamp > period_start) \
.filter(model.PackageRevision.revision_timestamp < period[1])
rrs = rr_q.filter(model.ResourceRevision.revision_timestamp > period_start) \
.filter(model.ResourceRevision.revision_timestamp < period[1])
pes = pe_q.filter(model.PackageExtraRevision.revision_timestamp > period_start) \
.filter(model.PackageExtraRevision.revision_timestamp < period[1])
authors = ' '.join(set([r[1].author for r in prs] +
[r[2].author for r in rrs] +
[r[2].author for r in pes]))
dates = set([r[1].timestamp.date() for r in prs] +
[r[2].timestamp.date() for r in rrs] +
[r[2].timestamp.date() for r in pes])
dates_formatted = ' '.join([date.isoformat()
for date in sorted(dates)])
if authors:
published = not asbool(pkg.extras.get('unpublished'))
modified[period_name].append(
(pkg.id, pkg.name, pkg.title, lib.dataset_notes(pkg),
'modified', period_name,
dates_formatted, authors, published))
return created, modified
def add_progress_bar(iterable, caption=None):
try:
# Add a progress bar, if it is installed
import progressbar
bar = progressbar.ProgressBar(widgets=[
(caption + ' ') if caption else '',
progressbar.Percentage(), ' ',
progressbar.Bar(), ' ', progressbar.ETA()])
return bar(iterable)
except ImportError:
return iterable
def publisher_activity_combinations():
for org in lib.all_organizations(include_none=True):
for include_sub_organizations in (False, True):
yield {'organization': org,
'include_sub_organizations': include_sub_organizations}
publisher_activity_report_info = {
'name': 'publisher-activity',
'title': 'Publisher activity',
'description': 'A quarterly list of datasets created and edited by a publisher.',
'option_defaults': OrderedDict((('organization', None),
('include_sub_organizations', False),
)),
'option_combinations': publisher_activity_combinations,
'generate': publisher_activity,
'template': 'report/publisher_activity.html',
} |
# coding=utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional distributed datagen and augmentation problem defs."""
import tensorflow as tf
import os
import uuid
import tempfile
import numpy as np
#from pcml.operations import cbt_datagen
from tensor2tensor.utils import registry
from clarify.utils import cbt_utils
#from pcml.operations import extract
from clarify.utils.cfg_utils import Config
TEST_CONFIG = Config()
from clarify.utils.cbt_utils import _lex_index
class TestCBTUtils(tf.test.TestCase):
def setUp(self):
self.project = TEST_CONFIG.get("project")
self.instance = TEST_CONFIG.get("test_cbt_instance")
self.tmpdir = tempfile.mkdtemp()
self.table = "clarify-test-{}-cbt-utils".format(str(uuid.uuid4())[0:8])
def test_helper_models(self):
"""Currently these don't enforce types of the nested objects."""
vm = cbt_utils.VideoMeta(video_length=1,
audio_length=1,
shard_id=0,
video_id=0,
audio_block_size=1000)
vm.as_dict()
sampling_meta00 = {
'frame_skip_size': 0,
'frame_shift': 0,
'frame_sample_bounds': [178, 192],
'audio_sample_bounds': [315056, 341604]
}
abm = {
"min_query_block": 1,
"max_query_block": 3,
"num_query_blocks": 3,
"query_start": 100,
"query_end": 200
}
avcs = cbt_utils.AVCorrespondenceSample(
video=np.array([1, 2, 3]),
audio=np.array([1, 2, 3]),
labels={
"same_video": 1,
"overlap": 0
},
meta={
"video_source": vm,
"audio_source": vm,
"video_sample_meta": sampling_meta00,
"audio_sample_meta": sampling_meta00,
"audio_keys": ["foo"],
"frame_keys": ["foo", "foo"],
"audio_block_meta": abm
})
serialized = avcs.serialize()
vsm = cbt_utils.VideoShardMeta(num_videos=1,
status="started",
shard_id=0,
num_shards=1)
vsm.num_video = 10
vsm.status = "finished"
vsm.as_dict()
def test_set_and_lookup_shard_meta(self):
table_tag = "{}-meta".format(self.table)
prefix = "train"
selection = cbt_utils.RawVideoSelection(project=self.project,
instance=self.instance,
table=table_tag,
prefix=prefix)
sent_meta = cbt_utils.VideoShardMeta(shard_id=0,
num_videos=1,
status="finished",
num_shards=1)
selection.set_shard_meta(sent_meta)
recv_meta = selection.lookup_shard_metadata()
train_meta_key = "train_meta_{}".format(_lex_index(0)).encode()
self.assertTrue(train_meta_key in recv_meta)
self.assertEqual(recv_meta[train_meta_key].as_dict(), sent_meta.as_dict())
"""
def test_generate_av_correspondence_examples(self):
table_tag = "{}-pairs".format(self.table)
prefix = "train"
manifest_path = "gs://clarify-dev/test/extract/manifest.csv"
frames_per_video = 15
downsample_xy_dims = 64
greyscale = True
num_channels = 1
selection = cbt_utils.RawVideoSelection(project=self.project,
instance=self.instance,
table=table_tag,
prefix=prefix)
extract.extract_to_cbt(manifest_path=manifest_path,
shard_id=0,
num_shards=1,
project=self.project,
instance=self.instance,
table=table_tag,
target_prefix=prefix,
tmp_dir=tempfile.mkdtemp(),
downsample_xy_dims=downsample_xy_dims,
greyscale=greyscale,
resample_every=2,
audio_block_size=1000)
selection_meta = selection.lookup_shard_metadata()
train_meta_key = "train_meta_{}".format(_lex_index(0)).encode()
self.assertTrue(selection_meta[train_meta_key].num_videos == 1)
video_meta = selection._get_random_video_meta(selection_meta)
generator = selection.sample_av_correspondence_examples(
frames_per_video=frames_per_video, max_num_samples=1)
sample = generator.__next__()
self.assertTrue(isinstance(sample, dict))
for key, value in sample.items():
cond = isinstance(value, cbt_utils.AVCorrespondenceSample)
self.assertTrue(cond)
positive_same = sample["positive_same"]
negative_same = sample["negative_same"]
#negative_different = sample["negative_different"]
# The expected video and audio shapes
video_shape = (frames_per_video, downsample_xy_dims, downsample_xy_dims,
num_channels)
def _verify(sample, same_video, overlap):
self.assertEqual(sample.labels["same_video"], same_video)
self.assertEqual(sample.labels["overlap"], overlap)
self.assertEqual(type(sample.video), np.ndarray)
self.assertEqual(type(sample.audio), np.ndarray)
reshaped = np.reshape(sample.video, video_shape)
flat = sample.video.flatten().tolist()
self.assertTrue(isinstance(flat[0], int))
_verify(positive_same, 1, 1)
_verify(negative_same, 1, 0)
#_verify(negative_different, 0, 0)
generator = selection.sample_av_correspondence_examples(
frames_per_video=frames_per_video, max_num_samples=1, keys_only=True)
sample = generator.__next__()
serialized = sample["positive_same"].serialize()
def test_tfexampleselection_e2e(self):
table_tag = "{}-tfexe2e".format(self.table)
prefix = "train_"
video_shape = (4, 16, 16, 3)
audio_shape = (1234)
mock_num_examples = 100
selection = cbt_utils.TFExampleSelection(project=self.project,
instance=self.instance,
table=table_tag,
prefix=prefix)
# Check that the test table is empty
self.assertTrue(not selection.rows_at_least(1))
def _dummy_generator(n):
for _ in range(n + 1):
video = np.random.randint(0, 255, video_shape).astype(np.uint8)
audio = np.random.randint(0, 255, audio_shape).astype(np.uint8)
target_label = np.random.randint(0, 2, (1)).astype(np.uint8)
yield {
"audio": audio.tolist(),
"video": video.flatten().tolist(),
"target": target_label.tolist()
}
num_records_loaded = selection.random_load_from_generator(
generator=_dummy_generator(mock_num_examples))
self.assertEqual(num_records_loaded, mock_num_examples)
# In astronomically rare cases this could flake but with an alphabet
# of 26 and a prefix tag length of 4 the probability of having
# more than 50 collisions is low... like < (50/(26^4))^50...
# 9e-199 that's almost 1/(2*googles).
self.assertTrue(selection.rows_at_least(0.5 * mock_num_examples))
example_iterator = selection.iterate_tfexamples()
ex = example_iterator.__next__()
recv_audio = ex.features.feature['audio'].int64_list.value
recv_video = ex.features.feature['video'].int64_list.value
recv_target = ex.features.feature['target'].int64_list.value
_ = np.reshape(recv_audio, audio_shape)
_ = np.reshape(recv_video, video_shape)
_ = np.reshape(recv_target, (1))
def test_e2e_via_problem(self):
table_tag = "{}-prob".format(self.table)
prefix = "train"
manifest_path = "gs://clarify-dev/test/extract/manifest.csv"
frames_per_video = 15
source_table_tag = table_tag + "s"
target_table_tag = table_tag + "t"
source_selection = cbt_utils.RawVideoSelection(project=self.project,
instance=self.instance,
table=source_table_tag,
prefix=prefix)
extract.extract_to_cbt(manifest_path=manifest_path,
shard_id=0,
num_shards=1,
project=self.project,
instance=self.instance,
table=source_table_tag,
target_prefix=prefix,
tmp_dir=tempfile.mkdtemp())
test_problem = registry.problem("cbt_datagen_test_problem")
example_generator = test_problem.sampling_generator(source_selection)
target_selection = cbt_utils.TFExampleSelection(project=self.project,
instance=self.instance,
table=table_tag + "t",
prefix=prefix)
num_records_loaded = target_selection.random_load_from_generator(
generator=example_generator)
self.assertTrue(num_records_loaded > 0)
"""
if __name__ == "__main__":
tf.test.main()
|
import socket
import sys
HOST, PORT = "localhost", 8888
data = " ".join(sys.argv[1:])
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(bytes((data + "\n"),'utf-8'), (HOST, PORT))
received = sock.recv(1024)
print("Sent: {}".format(data))
print("Received: {}".format(received)) |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution as distributions
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.ops import math_ops
_condition_kwargs_dict = {
"bijector_kwargs": ("Python dictionary of arg names/values "
"forwarded to the bijector."),
"distribution_kwargs": ("Python dictionary of arg names/values "
"forwarded to the distribution."),
}
class TransformedDistribution(distributions.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
Shapes, type, and reparameterization are taken from the base distribution.
Write `P(Y=y)` for cumulative density function of random variable (rv) `Y` and
`p` for its derivative wrt to `Y`. Assume that `Y=g(X)` where `g` is
continuous and `X=g^{-1}(Y)`. Write `J` for the Jacobian (of some function).
A `TransformedDistribution` alters the input/outputs of a `Distribution`
associated with rv `X` in the following ways:
* `sample`:
Mathematically:
```none
Y = g(X)
```
Programmatically:
```python
return bijector.forward(distribution.sample(...))
```
* `log_prob`:
Mathematically:
```none
(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)
```
Programmatically:
```python
return (bijector.inverse_log_det_jacobian(x) +
distribution.log_prob(bijector.inverse(x))
```
* `log_cdf`:
Mathematically:
```none
(log o P o g^{-1})(y)
```
Programmatically:
```python
return distribution.log_prob(bijector.inverse(x))
```
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=mu, sigma=sigma),
bijector=ds.bijector.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=mu, sigma=sigma),
bijector=ds.bijector.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(x), reduction_indices=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tf.contrib.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=0, sigma=1),
bijector=ds.bijector.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0),
name="NormalTransformedDistribution")
```
"""
def __init__(self,
distribution,
bijector,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for the distribution. Default:
`bijector.name + distribution.name`.
"""
parameters = locals()
parameters.pop("self")
name = name or bijector.name + distribution.name
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
is_continuous=self._distribution.is_continuous,
is_reparameterized=self._distribution.is_reparameterized,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
list(bijector.parameters.values())),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape(self):
return self.bijector.forward_event_shape(
self.distribution.event_shape())
def _get_event_shape(self):
return self.bijector.get_forward_event_shape(
self.distribution.get_event_shape())
def _batch_shape(self):
return self.distribution.batch_shape()
def _get_batch_shape(self):
return self.distribution.get_batch_shape()
@distribution_util.AppendDocstring(
"""Samples from the base distribution and then passes through
the bijector's forward transform.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _sample_n(self, n, seed=None,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.distribution.sample(sample_shape=n, seed=seed,
**distribution_kwargs)
# Recall that a bijector is named for its forward transform, i.e.,
# `Y = g(X)`,
return self.bijector.forward(x, **bijector_kwargs)
@distribution_util.AppendDocstring(
"""Implements `(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)`,
where `g^{-1}` is the inverse of `transform`.
Also raises a `ValueError` if `inverse` was not provided to the
distribution and `y` was not returned from `sample`.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(
y, **bijector_kwargs)
return ildj + self.distribution.log_prob(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
"""Implements `p(g^{-1}(y)) det|J(g^{-1}(y))|`, where `g^{-1}` is the
inverse of `transform`.
Also raises a `ValueError` if `inverse` was not provided to the
distribution and `y` was not returned from `sample`.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(
y, **bijector_kwargs)
return math_ops.exp(ildj) * self.distribution.prob(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _log_survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
|
import unittest
from katas.kyu_7.ninja_vs_samurai_strike import Warrior
class WarriorTestCase(unittest.TestCase):
def setUp(self):
self.ninja = Warrior('Ninja')
self.samurai = Warrior('Samurai')
def test_equals(self):
self.samurai.strike(self.ninja, 3)
self.assertEqual(self.ninja.health, 70)
|
CHEPRANOJ=''' Cau
+-----------X-------------------------XX Chuong
| Giay Duong
|
Ha Dong Nong |
Ho Chi XX---------------X------------------------X--------------------------X----+
Minh Dong Anh Nghiep
Potoční
+-------------------------X---------------------XX Chebská II.
|
|
|
Národní Vozovna Jana Koh-i-noor
Bílá Hora XX-----------------+ +----------X-------------------------Střešice--------X-----------------X-------------------XX Nádraží
| | Divadlo | Masaryka Hostivař
| | |
| | |
X Vyšehrad |
| | |
| | |
Koželužská | | Chebský |
Chebská I.XX-------X---------+ +---------------------------X-----------+
hrad
Lothlórien
+------------X
Ered Luin Mordor | |
Middle XX-----------------X--------------------------X-----------+ |
|
|
| Bruinen
+----------------------X---------------------XX Earth
'''
|
"""
Basic definition for a file probe, for inspecting the fields.
"""
class MediaProbe(object):
def __init__(self, filename):
object.__init__(self)
self.__filename = filename
self.__tags = {}
self.sample_rate = None
self.bit_rate = None
self.channels = None
self.codec = None
@property
def filename(self):
return self.__filename
def tag(self, name):
"""
If the probing discovered tags, then these are those tags.
"""
if name in self.__tags:
return self.__tags[name]
return None
@property
def tag_keys(self):
return self.__tags.keys()
def set_tag(self, name, value):
assert isinstance(name, str)
if value is None:
return
if not isinstance(value, str):
print("bad name/value: {0}={1}".format(repr(name), repr(value)))
assert isinstance(value, str)
value = value.strip()
if len(value) >= 0:
self.__tags[name] = value
def get_tags(self):
return dict(self.__tags)
def transcode(self, tofile, sample_rate=44100, bit_rate=0, channels=2, codec=None, verbose=False):
raise NotImplementedError()
class ProbeFactory(object):
"""
Probes files.
"""
def is_supported(self, filename):
"""
Returns True if the filename is supported by this prober.
"""
raise NotImplementedError()
def probe(self, filename):
"""
Returns a MediaProbe for the filename.
"""
raise NotImplementedError()
|
######################################################################
# Author: Morgan Benningfield
# Username: benningfieldm
#
# Assignment: A03: A Pair of Fully Functional Gitty Psychedelic Robotic Turtles
#
# Purpose: Draws some triangles and supposed to look pretty. Totally not a reference to a Nintendo franchise.
#
# https://docs.google.com/document/d/1gZcK3zgjuXRP_axcvjqHHNq6AYC5Y-zop1d9dtkubIg/edit?usp=sharing
######################################################################
# Acknowledgements:
# You two for making the header outline.
# Rebekah Whitford for reexplaining some of the terms to me and referencing older assignments to look at.
# http://www.wolframalpha.com/widgets/gallery/view.jsp?id=c0abe9808671bca189c7e6a560739ae4 To find hex codes
# https://en.wikipedia.org/wiki/Equilateral_triangle Because I'm bad at geometry.
# https://cdn.shopify.com/s/files/1/0941/8552/products/Legend_of_Zelda_-_Triforce.jpg?v=1530023743 Okay fine I *really did*
# make something from a game
# Lovell's branch for reference on how to make words
####################################################################################
import turtle
def triangles(x, y):
"""
Draws some triangles. HYAH!
:param x: x coordinate of starting point
:param y: same as x but y
:return: None
"""
hyah = turtle.Turtle()
hyah.hideturtle()
hyah.color("#C8B400") # A golden color. Or mustard. Whelp.
hyah.penup()
hyah.setposition(x, y)
hyah.pendown()
hyah.begin_fill()
for side in range(3):
hyah.forward(200)
hyah.left(120)
hyah.end_fill()
def text(t, w):
"""
Uses a turtle to write some words.
:param t: Turtle used
:param w: Words
"""
t.goto(-20, -200)
t.pendown()
t.write(w, move=False, align='center', font=("Courier New", 29))
def main():
"""
The main function. Sets the background color and also chooses where these triangles go.
"""
window = turtle.Screen()
window.bgcolor("green") # Green is not a creative color
triangles(25, -100) # First triangle
triangles(-175, -100) # Second triangle
triangles(-75, 75) # THIRD TRIANGLE
# PLOT TWIST: THERE'S AN INVISIBLE FORTH TRIANGLE. WHOOOAAA
tortoise = turtle.Turtle() # Not at turtle but a turtle. Tortoise. You shall write words.
tortoise.hideturtle()
tortoise.penup()
words = "This is the TRIBORCE" # Not the Triforce. The Triborce. Obviously better than the "original"
text(tortoise, words)
window.exitonclick() # Makes it so I can exit on command rather than it trying to kick us out.
main() # CALL THAT MAIN
|
#!/usr/bin/env python3
import i3ipc
import argparse
parser = argparse.ArgumentParser(description='un-fullscreen current container, '
'fullscreen next container, focus on new fullscreen')
parser.add_argument('direction', type=int, help='0: backwards, 1: forwards')
parser.add_argument('--times', '-t', type=int, default=1, help='how many '
'times')
args = parser.parse_args()
i3 = i3ipc.Connection()
root = i3.get_tree()
focused = root.find_focused()
workspace = focused.workspace()
fullscreen = workspace.find_fullscreen()
leaves = workspace.leaves()
number_of_leaves = len(leaves)
times = args.times % number_of_leaves
def next(direction=1):
command = ""
old_index= -1
for i in range(number_of_leaves):
if leaves[i].id == fullscreen[0].id:
old_index = i
if old_index != -1:
new_index = (old_index + direction) % number_of_leaves
else:
raise Exception("This shouldn't happen")
return new_index
if(len(fullscreen) == 1 and number_of_leaves > 1 and times > 0):
command = ""
#command += "[con_id=%s] fullscreen toggle;" % (fullscreen[0].id)
if args.direction == 0:
for i in range(times):
new_index = next(-1)
elif args.direction == 1:
for i in range(times):
new_index = next(1)
command += "[con_id=%s] fullscreen toggle;" % (leaves[new_index].id)
command += "[con_id=%s] focus;" % (leaves[new_index].id)
i3.command(command)
|
"""Tests to make sure tests given in the documentation work"""
import os
import shutil
import sys
import unittest
import atsim.potentials
import atsim.potentials.config
from atsim.potentials import EAMPotential, Potential
import py.path
import pytest
from ._rundlpoly import extractDLPOLYEnergy, needsDLPOLY, runDLPoly
from ._rungulp import gulp_uo2_energy_fixture, needsGULP
from ._runlammps import extractLAMMPSEnergy, needsLAMMPS, runLAMMPS
from ._tempfiletestcase import TempfileTestCase
def _getDocsDirectory():
"""Returns absolute path to docs/ directory"""
docsdir = os.path.join("docs", "potentials")
return os.path.abspath(docsdir)
def _get_user_guide_directory():
"""Returns absolute path to docs/ directory"""
docsdir = os.path.join("docs", "user_guide")
return os.path.abspath(docsdir)
def _getLAMMPSResourceDirectory():
return os.path.join(os.path.dirname(__file__), 'lammps_resources')
def _getDLPolyResourceDirectory():
return os.path.join(os.path.dirname(__file__), 'dl_poly_resources')
if sys.version_info.major == 3 and sys.version_info.minor >= 5:
import importlib.util
def _loadModule(scriptname):
name = os.path.basename(scriptname)
name = os.path.splitext(name)[0]
spec = importlib.util.spec_from_file_location(name, scriptname)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
elif sys.version_info.major == 2:
import imp
def _loadModule(scriptname):
name = os.path.basename(scriptname)
name = os.path.splitext(name)[0]
with open(scriptname) as infile:
mod = imp.load_module(name, infile, scriptname, ('.py', 'U', 1))
# mod = importlib.load_module(name,infile, scriptname, ('.py', 'U', 1))
return mod
else:
raise Exception(
"No implementation of _loadModule for python version {}".format(sys.version))
# basak_tabulate.py
class basak_tabulateTestCase(TempfileTestCase):
"""Test docs/potentials/basak_tabulate.py"""
test_name = os.path.join(
_getDocsDirectory(), os.pardir, "user_guide", "basak_tabulate.py")
@needsDLPOLY
def testExample(self):
"""Test example basak_tabulate.py"""
from atsim.potentials import potentialforms
import itertools
exampleModule = _loadModule(self.test_name)
dldir = _getDLPolyResourceDirectory()
oldpwd = os.getcwd()
try:
os.chdir(self.tempdir)
shutil.copyfile(os.path.join(dldir, "CONTROL_pair"), "CONTROL")
exampleModule.main()
def dotest(d, testfunc):
# exampleModule.main()
pobjs = exampleModule.makePotentialObjects()
species = [d["speciesA"], d["speciesB"]]
spairs = [
tuple(sorted(p)) for p in itertools.combinations_with_replacement(species, 2)]
species = set(spairs)
pobjs = [p for p in pobjs if tuple(
sorted([p.speciesA, p.speciesB])) in species]
with open('TABLE', 'w') as outfile:
atsim.potentials.writePotentials(
'DL_POLY',
pobjs,
6.5, 6500,
out=outfile)
d = dict(d)
for i in range(4):
r = float(i)
r += 1.0
expect = testfunc(r)
d['Ax'] = r
with open(os.path.join(dldir, "CONFIG_pair.in"), 'r') as infile:
with open("CONFIG", "w") as outfile:
outfile.write(infile.read() % d)
from io import StringIO
sio = StringIO()
print(u"vdw %d" % len(pobjs), file=sio)
for p in pobjs:
print(u"%s %s tab" %
(p.speciesA, p.speciesB), file=sio)
d["potDef"] = sio.getvalue()
with open(os.path.join(dldir, "FIELD_pair.in"), 'r') as infile:
with open("FIELD", "w") as outfile:
outfile.write(infile.read() % d)
runDLPoly()
dlenergy = extractDLPOLYEnergy()
self.assertAlmostEqual(expect, dlenergy, places=4)
os.remove("STATIS")
os.remove("CONFIG")
os.remove("FIELD")
d = dict(speciesA="O", speciesB="O",
Ax=0.0, Ay=0.0, Az=0.0,
Bx=0.0, By=0.0, Bz=0.0)
testfunc = potentialforms.buck(1633.00510, 0.327022, 3.948790)
dotest(d, testfunc)
d = dict(speciesA="U", speciesB="U",
Ax=0.0, Ay=0.0, Az=0.0,
Bx=0.0, By=0.0, Bz=0.0)
testfunc = potentialforms.buck(294.640000, 0.327022, 0.0)
dotest(d, testfunc)
d = dict(speciesA="O", speciesB="U",
Ax=0.0, Ay=0.0, Az=0.0,
Bx=0.0, By=0.0, Bz=0.0)
buck_OU = potentialforms.buck(693.648700, 0.327022, 0.0)
morse_OU = potentialforms.morse(1.6500, 2.36900, 0.577190)
testfunc = atsim.potentials.plus(buck_OU, morse_OU)
dotest(d, testfunc)
finally:
os.chdir(oldpwd)
def testLAMMPSExample(self):
"""Test doumentation example Quick-Start: LAMMPS."""
test_name = os.path.join(
_get_user_guide_directory(), "basak_tabulate_lammps.py")
oldpwd = os.getcwd()
try:
os.chdir(self.tempdir)
exampleModule = _loadModule(test_name)
exampleModule.main()
finally:
os.chdir(oldpwd)
# eam_tabulate_example1.py
class eam_tabulate_example1TestCase(TempfileTestCase):
"""Test docs/potentials/eam_example1.py"""
test_name = os.path.join(_getDocsDirectory(), "eam_example1.py")
@needsLAMMPS
def testExample(self):
"""Test example eam_example1.py"""
exampleModule = _loadModule(self.test_name)
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "doc_example1_Ag_fcc.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * Ag.eam.alloy Ag\n")
# Run the main method
exampleModule.main()
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(-8.23982879, energy, places=5)
finally:
os.chdir(oldpwd)
class eam_tabulate_example1_procedural_TestCase(TempfileTestCase):
"""Test docs/potentials/eam_tabulate_example1.py"""
test_name = os.path.join(_getDocsDirectory(), "eam_tabulate_example1.py")
@needsLAMMPS
def testExample(self):
"""Test example eam_tabulate_example1.py"""
exampleModule = _loadModule(self.test_name)
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "doc_example1_Ag_fcc.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam\n")
potfile.write("pair_coeff 1 1 Ag.eam\n")
# Run the main method
exampleModule.main()
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(-8.23982879, energy, places=5)
finally:
os.chdir(oldpwd)
class eam_tabulate_example2TestCase(TempfileTestCase):
"""Test docs/potentials/eam_tabulate_example2a.py and docs/potentials/eam_tabulate_example2b.py"""
test_nameA = os.path.join(_getDocsDirectory(), "eam_tabulate_example2a.py")
test_nameA_obj = os.path.join(_getDocsDirectory(), "eam_example2a.py")
test_nameB = os.path.join(_getDocsDirectory(), "eam_tabulate_example2b.py")
test_nameB_obj = os.path.join(_getDocsDirectory(), "eam_example2b.py")
@needsLAMMPS
def testExampleA_Pair(self):
"""Test pair-potentials correctly defined for EAM tabulation documentation example 2a"""
exampleModule = _loadModule(self.test_nameA)
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "setfl_pair.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
inputExpect = [
(1.24246478E-02, "Al Al"), # Al Al
(-0.121863537, "Al Cu"), # Al Cu
(-0.179150283, "Cu Cu") # Cu Cu
]
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
_eampotentials, pairPotentials = exampleModule.makePotentialObjects()
eamPotentials = None
# Create EAMPotential
def density(r):
return 0.0
def embed(rho):
return 0.0
eamPotentials = [
EAMPotential("Cu", 29, 63.55, embed, density),
EAMPotential("Al", 13, 26.98, embed, density)]
nrho = 50000
drho = 0.001
nr = 12000
dr = 0.001
from atsim.potentials import writeSetFL
with open("table.set", 'w') as outfile:
writeSetFL(
nrho, drho,
nr, dr,
eamPotentials,
pairPotentials,
comments=['Zhou Al Cu', "", ""],
out=outfile)
for expect, potmap in inputExpect:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * table.set "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(expect, energy, msg=potmap)
finally:
os.chdir(oldpwd)
@needsLAMMPS
def testExampleA_Density(self):
"""Test density functions correctly defined for EAM tabulation documentation example 2a"""
exampleModule = _loadModule(self.test_nameA)
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "setfl_pair.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
def nullfunc(r):
return 0.0
eamPotentials, pairPotentials = exampleModule.makePotentialObjects()
pairPotentials = None
pairPotentials = [
Potential("Cu", "Cu", nullfunc),
Potential("Al", "Al", nullfunc),
Potential("Al", "Cu", nullfunc)
]
# Create EAMPotential
def embed(rho):
return rho
for epot in eamPotentials:
epot.embeddingFunction = embed
nrho = 50000
drho = 0.001
nr = 12000
dr = 0.001
from atsim.potentials import writeSetFL
with open("table.set", 'w') as outfile:
writeSetFL(
nrho, drho,
nr, dr,
eamPotentials,
pairPotentials,
comments=['Zhou Al Cu', "", ""],
out=outfile)
inputExpect = [
(2.0*1.218017211, "Cu Cu"), # Cu Cu
(2.0*1.716990097, "Al Al"), # Al Al
(1.218017211+1.716990097, "Al Cu") # Al Cu
]
for expect, potmap in inputExpect:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * table.set "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(expect, energy, msg=potmap)
# Now repeat for triplet of atoms
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "setfl_triplet.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
dens_Cu = [
p.electronDensityFunction for p in eamPotentials if p.species == "Cu"][0]
dens_Al = [
p.electronDensityFunction for p in eamPotentials if p.species == "Al"][0]
hyp = 3.818376618407357
inputExpect = [
(4 * dens_Cu(2.7) + 2 * dens_Cu(hyp), "Cu Cu"), # Cu Cu
(4 * dens_Al(2.7) + 2 * dens_Al(hyp), "Al Al"), # Al Al
(2 * dens_Cu(2.7) + 2 * dens_Cu(hyp) + \
2*dens_Al(2.7), "Al Cu"), # Al Cu
(2 * dens_Al(2.7) + 2 * dens_Al(hyp) + \
2*dens_Cu(2.7), "Cu Al") # Cu Al
]
for expect, potmap in inputExpect:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * table.set "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(expect, energy, msg=potmap)
finally:
os.chdir(oldpwd)
def testWriteSetFLEmbedCu(self):
"""Test Cu embedding function correctly defined for EAM tabulation documentation example 2"""
exampleModule = _loadModule(self.test_nameA)
eamPotentials, _pairPotentials = exampleModule.makePotentialObjects()
embed_Cu = [
p.embeddingFunction for p in eamPotentials if p.species == "Cu"][0]
self.assertAlmostEqual(-1.76619128240398, embed_Cu(10.0))
self.assertAlmostEqual(-2.18790796129658, embed_Cu(20.0))
self.assertAlmostEqual(-2.17281697911785, embed_Cu(30.0))
self.assertAlmostEqual(-2.13787794765212, embed_Cu(40.0))
def testWriteSetFLEmbedAl(self):
"""Test Al embedding function correctly defined for EAM tabulation documentation example 2"""
exampleModule = _loadModule(self.test_nameA)
eamPotentials, _pairPotentials = exampleModule.makePotentialObjects()
embed_Al = [
p.embeddingFunction for p in eamPotentials if p.species == "Al"][0]
self.assertAlmostEqual(-2.35881750559297, embed_Al(10.0))
self.assertAlmostEqual(-2.82971758138417, embed_Al(20.0))
self.assertAlmostEqual(-2.75841139984064, embed_Al(30.0))
self.assertAlmostEqual(-2.47821972143384, embed_Al(40.0))
@needsLAMMPS
def testExampleA(self):
"""Test example eam_tabulate_example2a.py"""
exampleModule = _loadModule(self.test_nameA)
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "setfl_triplet.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "Zhou_AlCu.setfl"), os.path.join(self.tempdir, "table.setfl"))
potmaps = ["Cu Cu", "Al Al", "Al Cu", "Cu Al"]
expect = []
# Run the Zhou tabulation created using tools from http://www.ctcms.nist.gov/potentials/Zhou04.html
for potmap in potmaps:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * table.setfl "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertTrue(energy != None)
expect.append(energy)
# Make tabulation
exampleModule.main()
for potmap, expectEnergy in zip(potmaps, expect):
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write(
"pair_coeff * * Zhou_AlCu.eam.alloy "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(
expectEnergy, energy, places=4, msg=potmap)
finally:
os.chdir(oldpwd)
def testExampleA_obj(self):
"""Test example eam_example2a.py"""
exampleModule = _loadModule(self.test_nameA_obj)
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "setfl_triplet.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "Zhou_AlCu.setfl"), os.path.join(self.tempdir, "table.setfl"))
potmaps = ["Cu Cu", "Al Al", "Al Cu", "Cu Al"]
expect = []
# Run the Zhou tabulation created using tools from http://www.ctcms.nist.gov/potentials/Zhou04.html
for potmap in potmaps:
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * table.setfl "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertTrue(energy != None)
expect.append(energy)
exampleModule.main()
for potmap, expectEnergy in zip(potmaps, expect):
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write(
"pair_coeff * * Zhou_AlCu.eam.alloy "+potmap+"\n")
runLAMMPS()
energy = extractLAMMPSEnergy()
self.assertAlmostEqual(
expectEnergy, energy, places=4, msg=potmap)
finally:
os.chdir(oldpwd)
@needsLAMMPS
@needsDLPOLY
def testExample2b(self):
exampleModuleB = _loadModule(self.test_nameB)
self._crossCheckLAMMPS_DLPOLY(exampleModuleB)
@needsLAMMPS
@needsDLPOLY
def testExample2b_obj(self):
exampleModuleB = _loadModule(self.test_nameB_obj)
self._crossCheckLAMMPS_DLPOLY(exampleModuleB)
def _crossCheckLAMMPS_DLPOLY(self, exampleModuleB):
"""Check that models tabulated for LAMMPS and DL_POLY give the same result (cross check example 2a and 2b)."""
exampleModuleA = _loadModule(self.test_nameA)
# exampleModuleB = _loadModule(self.test_nameB)
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
# DL_POLY Tabulation
shutil.copyfile(os.path.join(_getDLPolyResourceDirectory(
), "CONTROL_random_Al_Cu"), os.path.join(self.tempdir, "CONTROL"))
shutil.copyfile(os.path.join(_getDLPolyResourceDirectory(
), "CONFIG_random_Al_Cu"), os.path.join(self.tempdir, "CONFIG"))
shutil.copyfile(os.path.join(_getDLPolyResourceDirectory(
), "FIELD_random_Al_Cu"), os.path.join(self.tempdir, "FIELD"))
# Create TABEAM
exampleModuleB.main()
runDLPoly()
# import pdb;pdb.set_trace()
dlpolyenergy = extractDLPOLYEnergy()
# LAMMPS Tabulation
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(self.tempdir, "calc_energy.lmpin"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "random_Al_Cu.lmpstruct"), os.path.join(self.tempdir, "structure.lmpstruct"))
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/alloy\n")
potfile.write("pair_coeff * * Zhou_AlCu.eam.alloy Al Cu\n")
# Create the table files
exampleModuleA.main()
# import pdb;pdb.set_trace()
runLAMMPS()
lammpsenergy = extractLAMMPSEnergy()
self.assertAlmostEqual(lammpsenergy, dlpolyenergy, places=4)
finally:
os.chdir(oldpwd)
# eam_tabulate_example3a.py
# class eam_tabulate_example3TestCase(TempfileTestCase):
# """Test docs/potentials/eam_tabulate_example3a.py and eam_tabulate_example3b.py"""
# test_nameA = os.path.join(_getDocsDirectory(), "eam_tabulate_example3a.py")
# test_nameA_obj = os.path.join(_getDocsDirectory(), "eam_example3a.py")
# test_nameB = os.path.join(_getDocsDirectory(), "eam_tabulate_example3b.py")
# test_nameB_obj = os.path.join(_getDocsDirectory(), "eam_example3b.py")
def test_eam_tabulate_example3_ExampleA(tmpdir):
"""Test example eam_tabulate_example3a.py"""
test_nameA = os.path.join(_getDocsDirectory(), "eam_tabulate_example3a.py")
exampleModule = _loadModule(test_nameA)
oldpwd = os.getcwd()
os.chdir(tmpdir.strpath)
try:
exampleModule.main()
finally:
os.chdir(oldpwd)
def test_eam_tabulate_example3_ExampleB(tmpdir):
"""Test example eam_tabulate_example3b.py"""
test_nameB = os.path.join(_getDocsDirectory(), "eam_tabulate_example3b.py")
exampleModule = _loadModule(test_nameB)
oldpwd = os.getcwd()
os.chdir(tmpdir.strpath)
try:
exampleModule.main()
finally:
os.chdir(oldpwd)
@needsLAMMPS
@needsDLPOLY
@pytest.mark.parametrize(("test_name_A", "test_name_B"), [
("eam_tabulate_example3a.py", "eam_tabulate_example3b.py"),
("eam_example3a.py", "eam_example3b.py"),
("eam_example3a.py", "eam_tabulate_example3b.py"),
("eam_tabulate_example3a.py", "eam_example3b.py")
])
def test_eam_tabulate_example3_cross_check_LAMMPS_DLPOLY(test_name_A, test_name_B, tmpdir):
test_name_A = os.path.join(_getDocsDirectory(), test_name_A)
test_name_B = os.path.join(_getDocsDirectory(), test_name_B)
exampleModuleA = _loadModule(test_name_A)
exampleModuleB = _loadModule(test_name_B)
_crossCheckLAMMPS_DLPOLY(tmpdir, exampleModuleA, exampleModuleB)
def _crossCheckLAMMPS_DLPOLY(tmpdir, exampleModuleA, exampleModuleB):
"""Check that models tabulated for LAMMPS and DL_POLY give the same result"""
tmpdir = tmpdir.strpath
oldpwd = os.getcwd()
os.chdir(tmpdir)
try:
# DL_POLY Tabulation
shutil.copyfile(os.path.join(_getDLPolyResourceDirectory(
), "CONTROL_random_Al_Fe"), os.path.join(tmpdir, "CONTROL"))
shutil.copyfile(os.path.join(_getDLPolyResourceDirectory(
), "CONFIG_random_Al_Fe"), os.path.join(tmpdir, "CONFIG"))
shutil.copyfile(os.path.join(_getDLPolyResourceDirectory(
), "FIELD_random_Al_Fe"), os.path.join(tmpdir, "FIELD"))
# Create TABEAM
exampleModuleB.main()
runDLPoly()
dlpolyenergy = extractDLPOLYEnergy()
# LAMMPS Tabulation
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "calc_energy.lmpin"), os.path.join(tmpdir, "calc_energy.lmpin"))
shutil.copyfile(os.path.join(_getLAMMPSResourceDirectory(
), "random_Al_Fe.lmpstruct"), os.path.join(tmpdir, "structure.lmpstruct"))
with open("potentials.lmpinc", "w") as potfile:
potfile.write("pair_style eam/fs\n")
potfile.write("pair_coeff * * Mendelev_Al_Fe.eam.fs Al Fe\n")
# Create the table files
exampleModuleA.main()
# import pdb;pdb.set_trace()
runLAMMPS()
lammpsenergy = extractLAMMPSEnergy()
assert lammpsenergy == pytest.approx(dlpolyenergy)
finally:
os.chdir(oldpwd)
try:
import numpy
NUMPY_AVAILABLE = True
except ImportError:
NUMPY_AVAILABLE = False
# zbl_spline.py
class zbl_splineTestCase(TempfileTestCase):
"""Test docs/potentials/zbl_spline.py"""
test_name = os.path.join(_getDocsDirectory(), "zbl_spline.py")
@unittest.skipIf(not NUMPY_AVAILABLE, "numpy is not installed")
def testExample(self):
"""Test example zbl_spline.py"""
exampleModule = _loadModule(self.test_name)
oldpwd = os.getcwd()
os.chdir(self.tempdir)
try:
exampleModule.main()
output_path = os.path.join(self.tempdir, "bks_buck.dat")
assert os.path.exists(output_path)
with open(output_path) as infile:
line = next(infile)
tokens = line.split()
r = float(tokens[0])
assert pytest.approx(0.1) == r
line = next(infile)
tokens = line.split()
r = float(tokens[0])
assert pytest.approx(0.1 + (10.0-0.1)/5000.0) == r
finally:
os.chdir(oldpwd)
basak_aspot_files = py.path.local(__file__).dirpath(
"..", "docs", "user_guide", "example_files").listdir("basak*.aspot")
@needsLAMMPS
@pytest.mark.parametrize("aspotfile", basak_aspot_files)
def test_basak_files(tmpdir, aspotfile):
# Copy files in from example directory
srcdir = py.path.local(__file__).dirpath(
"..", "docs", "quick_start", "basak_tabulate_lammps")
srcdir.join("UO2.lmpstruct").copy(tmpdir.join("UO2.lmpstruct"))
if aspotfile.basename == "basak_table_form.aspot":
input_file = py.path.local(_getLAMMPSResourceDirectory()).join(
"basak_energy_table_form.lmpin")
expect_e = pytest.approx(-172.839, abs=1e-3)
else:
input_file = py.path.local(
_getLAMMPSResourceDirectory()).join("basak_energy.lmpin")
expect_e = pytest.approx(-172.924, abs=1e-3)
input_file.copy(tmpdir.join("calc_energy.lmpin"))
# Generate table file
config = atsim.potentials.config.Configuration()
tabulation = config.read(aspotfile.open())
with tmpdir.join("Basak.lmptab").open("w") as outfile:
tabulation.write(outfile)
runLAMMPS(cwd=tmpdir.strpath)
actual_e = extractLAMMPSEnergy(cwd=tmpdir.strpath)
assert expect_e == actual_e
morelon_files = py.path.local(__file__).dirpath(
"..", "docs", "user_guide", "example_files").listdir("morelon*.aspot")
@needsGULP
@pytest.mark.parametrize("charges", [[-1.613626, 3.227252]])
@pytest.mark.parametrize("aspot", morelon_files)
def test_morelon_files(aspot, gulp_uo2_energy_fixture):
tmpdir = gulp_uo2_energy_fixture
# aspot = py.path.local(__file__).dirpath("..", "docs", "user_guide", "example_files").join("morelon.aspot")
CPT = atsim.potentials.config.ConfigParserOverrideTuple
# overrides = [ CPT("Tabulation", "target", "GULP"), CPT("Tabulation", "cutoff", "10.0"), CPT("Tabulation", "nr", "1001")]
overrides = [CPT("Tabulation", "target", "GULP")]
# import io
with aspot.open('r', encoding='utf-8') as infile:
cp = atsim.potentials.config.ConfigParser(infile, overrides=overrides)
tabulation = atsim.potentials.config.Configuration().read_from_parser(cp)
with tmpdir.join("potentials.lib").open("w", encoding='utf-8') as tabfile:
tabulation.write(tabfile)
expect = pytest.approx(-263.60598484)
actual_energy = gulp_uo2_energy_fixture.energy()
assert expect == actual_energy
@needsLAMMPS
@pytest.mark.parametrize(("evaluate_lmpin", "aspot_filename", "tab_filename", "expect_e", "a_flag", "b_flag"),
[
("standard_evaluate.lmpin", "standard_eam.aspot",
"standard_eam.eam", 24.00, True, False),
("standard_evaluate.lmpin", "standard_eam.aspot",
"standard_eam.eam", 131.8822, False, True),
("standard_evaluate.lmpin", "standard_eam.aspot",
"standard_eam.eam", 24.00 + 131.8822, True, True),
("finnis_sinclair_evaluate.lmpin", "finnis_sinclair_eam.aspot",
"finnis_sinclair.eam.fs", 24.00, True, False),
("finnis_sinclair_evaluate.lmpin", "finnis_sinclair_eam.aspot",
"finnis_sinclair.eam.fs", 209.137, False, True),
("finnis_sinclair_evaluate.lmpin", "finnis_sinclair_eam.aspot",
"finnis_sinclair.eam.fs", 24.00 + 209.137, True, True)
])
def test_user_guide_eam(tmpdir, evaluate_lmpin, aspot_filename, tab_filename, expect_e, a_flag, b_flag):
# Copy files in from example directory
srcdir = py.path.local(__file__).dirpath(
"..", "docs", "user_guide", "example_files")
srcdir.join("toy_structure.lmpstruct").copy(
tmpdir.join("toy_structure.lmpstruct"))
input_file = srcdir.join(evaluate_lmpin)
input_file.copy(tmpdir.join("calc_energy.lmpin"))
# Generate table file
config = atsim.potentials.config.Configuration()
aspotfile = srcdir.join(aspot_filename)
a_embed = 'as.zero'
b_embed = 'as.zero'
if a_flag:
a_embed = 'as.polynomial 0 1'
if b_flag:
b_embed = 'as.polynomial 0 1'
overrides = [
atsim.potentials.config.ConfigParserOverrideTuple(
u'EAM-Embed', 'A', a_embed),
atsim.potentials.config.ConfigParserOverrideTuple(
u'EAM-Embed', 'B', b_embed)
]
cp = atsim.potentials.config.ConfigParser(aspotfile.open(), overrides)
tabulation = config.read_from_parser(cp)
# tabulation = config.read(aspotfile.open())
with tmpdir.join(tab_filename).open("w") as outfile:
tabulation.write(outfile)
expect_e = pytest.approx(expect_e, abs=1e-3)
runLAMMPS(cwd=tmpdir.strpath)
actual_e = extractLAMMPSEnergy(cwd=tmpdir.strpath)
assert expect_e == actual_e
|
# Generated by Django 3.0.8 on 2020-09-16 03:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modulector', '0002_oldrefseqmapping'),
]
operations = [
migrations.CreateModel(
name='GeneSymbolMapping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('refseq', models.CharField(max_length=40, unique=True)),
('symbol', models.CharField(max_length=20)),
],
),
]
|
import unittest
import requests
from assertpy import *
from unittest.mock import *
from src.main import Insurance, Login
import json
class TestMainInsurance(unittest.TestCase):
def setUp(self):
self.insurance = Insurance()
self.login = Login()
# Tests Add_new_car
def test_add_new_car_passed_ints(self):
assert_that(self.insurance.add_new_car).raises(ValueError).when_called_with(123, 234, 123, 21, 222, 13, 43)
def test_add_new_car_passed_floats(self):
assert_that(self.insurance.add_new_car).raises(ValueError).when_called_with(1.23, 2.34, 12.3, 2.1, 0.222, 1.3,
4.3)
def test_add_new_car_passed_strings_doesnt_raise_ValueError(self):
assert_that(
self.insurance.add_new_car("bmw", "z4", "NO2222", "Fafik", "Fontanna", "2021", "5.0 Benzyna")).is_equal_to(
'Error occurred in adding new car :( No connection to the server')
def test_add_new_car_mocked_response_code_success(self):
with patch.object(requests, 'post') as post_mock:
post_mock.return_value.status_code = 200
post_mock.return_value.json = "success"
assert self.insurance.add_new_car("bmw", "z4", "NO2222", "Fafik", "Fontanna", "2021",
"5.0 Benzyna") == post_mock.return_value.json
def test_add_new_car_mocked_response_code_400(self):
with patch.object(requests, 'post') as post_mock:
post_mock.return_value.status_code = 400
result = "Check your passed information (that car exists in database)"
assert self.insurance.add_new_car("audi", "a4", "WX32131", "Malik", "Montana", "1998",
"1.0 hybryda diesel") == result
def test_add_new_car_mocked_response_code_failure(self):
with patch.object(requests, 'post') as post_mock:
post_mock.return_value.status_code = 404
result = "Error occurred in adding new car :( No connection to the server"
assert self.insurance.add_new_car("aston martin", "db11", "NO3961U", "Wiktor", "Morawski", "2017",
"5.2 Benzyna") == result
# Tests remove car
def test_remove_car_CarID_int(self):
assert_that(self.insurance.remove_car).raises(ValueError).when_called_with(1111111111143)
def test_remove_car_CarID_float(self):
assert_that(self.insurance.remove_car).raises(ValueError).when_called_with(11.321143)
def test_remove_car_passed_strings_doesnt_raise_ValueError(self):
assert_that(self.insurance.remove_car("2021213123213")).is_equal_to(
'Error occurred in removing :( No connection to the server')
"""
@patch('src.main.requests.delete')
def test_remove_car(self, mocked_request):
mocked_request = Mock()
expected_response = {'deleted': '2021213123213'}
mocked_request.return_value.json.return_value = expected_response
mocked_request.return_value.status_code.return_value = 200
self.assertEqual(self.insurance.remove_car('2021213123213'), mocked_request.return_value.json.return_value)
"""
# Tests update Plates
def test_update_plates_CarID_int(self):
assert_that(self.insurance.update_plates).raises(ValueError).when_called_with(1111111111143, 'str')
def test_update_plates_CarID_float(self):
assert_that(self.insurance.update_plates).raises(ValueError).when_called_with(11.321143, 'str')
def test_update_plates_plates_int(self):
assert_that(self.insurance.update_plates).raises(ValueError).when_called_with('str', 1111111111143)
def test_update_plates_plates_float(self):
assert_that(self.insurance.update_plates).raises(ValueError).when_called_with('str', 11.321143)
def test_update_plates_passed_strings_doesnt_raise_ValueError(self):
assert_that(self.insurance.update_plates("2021213123213", "WX12345")).is_equal_to(
"Error occurred in updating car's plates :( No connection to the server")
def test_update_plates_mocked_response_code_success(self):
with patch.object(requests, 'put') as put_mock:
put_mock.return_value.status_code = 200
put_mock.return_value.json = "success"
assert self.insurance.update_plates("12314124", "NO2222") == put_mock.return_value.json
def test_update_plates_mocked_response_code_403_CarID_not_found(self):
with patch.object(requests, 'put') as put_mock:
put_mock.return_value.status_code = 403
result = "Not found car with that CarID"
assert self.insurance.update_plates("68749", "WX32131") == result
def test_update_plates_mocked_response_code_422_plates_not_found(self):
with patch.object(requests, 'put') as put_mock:
put_mock.return_value.status_code = 422
result = "Incorrect plates"
assert self.insurance.update_plates("232133", "WX32131") == result
def test_update_plates_mocked_response_code_failure(self):
with patch.object(requests, 'post') as put_mock:
put_mock.return_value.status_code = 404
result = "Error occurred in updating car's plates :( No connection to the server"
assert self.insurance.update_plates("845987", "NO3961U") == result
# Tests update_owner
def test_update_owner_CarID_int(self):
assert_that(self.insurance.update_owner).raises(ValueError).when_called_with(1111111111143, 'filip',
'szczescniak')
def test_update_owner_CarID_float(self):
assert_that(self.insurance.update_owner).raises(ValueError).when_called_with(11.321143, 'andzej', 'golota')
def test_update_owner_owners_name_int(self):
assert_that(self.insurance.update_owner).raises(ValueError).when_called_with('1231445', 1111111111143, 'Falcon')
def test_update_owner_owners_name_float(self):
assert_that(self.insurance.update_owner).raises(ValueError).when_called_with('2313322', 11.321143,
'krzysiowiak')
def test_update_owner_owners_surname_int(self):
assert_that(self.insurance.update_owner).raises(ValueError).when_called_with('1235555', 'kuba', 1143)
def test_update_owner_owners_surname_float(self):
assert_that(self.insurance.update_owner).raises(ValueError).when_called_with('3123124', 'baran', 11.321143)
def test_update_owner_mocked_response_success(self):
self.insurance.update_owner = Mock(name="put_response")
self.insurance.update_owner.return_value = 'Success'
result = self.insurance.update_owner('1232134', 'Wiktor', 'Morawski')
self.assertEqual('Success', result, 'return from update_owner is incorrect')
# Tests get_all_client_cars
def test_get_all_client_cars_Name_not_str(self):
assert_that(self.insurance.get_all_client_cars).is_type_of(str).when_called_with(1111111111143,
'szczescniak')
def test_get_all_client_cars_Name_not_str(self):
assert_that(self.insurance.get_all_client_cars).raises(ValueError).when_called_with('golota', 11.321143)
@patch('src.main.Insurance.get_all_cars_info')
def test_get_all_client_cars_mocked_server_response(self, mocked_response):
mocked_response.return_value = [{"CarID": '3123331222', "brand": "BMW",
"model": "Series 3 F10",
"plates": "NO1234",
"owners_name": "Faik",
"owners_surname": "Malik",
"ending": '21-09-2021',
"Year": '2012',
"Engine": "3.0 Diesel"
}, {"CarID": '12312333',
"brand": "AUDI",
"model": "A3",
"plates": "NO22234",
"owners_name": "Fafik",
"owners_surname": "Malik",
"ending": '22-08-2022',
"Year": '2022',
"Engine": "1.0 Diesel"}]
expected = [{"CarID": '12312333',
"brand": "AUDI",
"model": "A3",
"plates": "NO22234",
"owners_name": "Fafik",
"owners_surname": "Malik",
"ending": '22-08-2022',
"Year": '2022',
"Engine": "1.0 Diesel"}]
result = self.insurance.get_all_client_cars('Fafik', 'Malik')
self.assertEqual(expected, result, "Not equal with fafik malik")
@patch('src.main.Insurance.get_all_cars_info')
def test_get_all_client_cars_mocked_server_Error_response(self, mocked_response):
mocked_response.return_value = 'Error occurred in connecting :( \n Try again later please'
assert_that(self.insurance.get_all_client_cars).raises(TypeError).when_called_with('Wiktor', 'Morawski')
class TestMainLogin(unittest.TestCase):
def __init__(self):
self.insurance = Insurance()
self.login = Login()
# Tests welcome_menu
def test_welcome_menu_call_login_to_db(self):
pass
def test_menu_call_insurance_methods(self):
pass
|
class Guild:
def __init__(
self,
id,
name,
icon,
splash,
discovery_splash,
owner_id,
region,
afk_channel_id,
afk_timeout,
verification_level,
default_message_notifications,
explicit_content_filter,
roles,
emojis,
features,
mfa_level,
system_channel_id,
system_channel_flags,
rules_channel_id,
vanity_url_code,
description,
banner,
premium_tier,
preferred_locale,
public_updates_channel_id,
):
self.id = id
self.name = name
self.icon = icon
self.splash = splash
self.discovery_splash = discovery_splash
self.owner_id = owner_id
self.region = region
self.afk_channel_id = afk_channel_id
self.afk_timeout = afk_timeout
self.verification_level = verification_level
self.default_message_notifications = default_message_notifications
self.explicit_content_filter = explicit_content_filter
self.roles = roles
self.emojis = emojis
self.features = features
self.mfa_level = mfa_level
self.system_channel_id = system_channel_id
self.system_channel_flags = system_channel_flags
self.rules_channel_id = rules_channel_id
self.vanity_url_code = vanity_url_code
self.description = description
self.banner = banner
self.premium_tier = premium_tier
self.preferred_locale = preferred_locale
self.public_updates_channel_id = public_updates_channel_id |
#! /usr/bin/python
import psycopg2
import pandas as pd
from Tushare.python.SQLMap import *
import pdb
##################### Establish the connection to SQL database
connect = psycopg2.connect(database="",user="",password="",host="127.0.0.1",port="5432")
print "Database on-line..."
def FormulateList(inlist,header):
fstr = "("
for i, item in enumerate(inlist):
if header[i] == 'ts_code' and item != 'ts_code':
fstr += "'"+str(item)+"'"+","
else:
fstr += str(item) + ","
fstr = fstr.rstrip(',') + ")"
return fstr
def CreateForm( header, form, cursor):
create_cmd = "create table if not exists " + form
create_cmd += "("
for i,title in enumerate(header):
# create_cmd += title + " " + SQLType[title] + ( " primary key not null, " if i == 0 else ",")
create_cmd += title + " " + SQLType[title] + ","
create_cmd = create_cmd.rstrip(',')+ ");"
cursor.execute(create_cmd)
connect.commit()
# pdb.set_trace()
def InsertDataToSQL(dataframe, form):
cursor = connect.cursor()
header = list(dataframe.columns.values)
CreateForm(header, form, cursor)
#### Loop over DataFrame and insert the data
for index,row in dataframe.iterrows():
insert_cmd ="insert into "+form
insert_cmd += FormulateList(header,header)
insert_cmd += " values "
insert_cmd += FormulateList(map(lambda x: row[x],header),header )
insert_cmd += ";"
#print insert_cmd
cursor.execute(insert_cmd)
connect.commit()
connect.close()
|
# -*- encoding: utf-8 -*-
# pylint: disable=E0203,E1101,C0111
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun
from ..shape_object import ShapeObject
class ZipMapDictionary(dict):
"""
Custom dictionary class much faster for this runtime,
it implements a subset of the same methods.
"""
__slots__ = ['_rev_keys', '_values', '_mat']
@staticmethod
def build_rev_keys(keys):
res = {}
for i, k in enumerate(keys):
res[k] = i
return res
def __init__(self, rev_keys, values, mat=None):
"""
@param rev_keys returns by @see me build_rev_keys,
*{keys: column index}*
@param values values
@param mat matrix if values is a row index,
one or two dimensions
"""
if mat is not None:
if not isinstance(mat, numpy.ndarray):
raise TypeError( # pragma: no cover
'matrix is expected, got {}.'.format(type(mat)))
if len(mat.shape) not in (2, 3):
raise ValueError( # pragma: no cover
"matrix must have two or three dimensions but got {}"
".".format(mat.shape))
dict.__init__(self)
self._rev_keys = rev_keys
self._values = values
self._mat = mat
def __getstate__(self):
"""
For pickle.
"""
return dict(_rev_keys=self._rev_keys,
_values=self._values,
_mat=self._mat)
def __setstate__(self, state):
"""
For pickle.
"""
if isinstance(state, tuple):
state = state[1]
self._rev_keys = state['_rev_keys']
self._values = state['_values']
self._mat = state['_mat']
def __getitem__(self, key):
"""
Returns the item mapped to keys.
"""
if self._mat is None:
return self._values[self._rev_keys[key]]
return self._mat[self._values, self._rev_keys[key]]
def __setitem__(self, pos, value):
"unused but used by pickle"
pass
def __len__(self):
"""
Returns the number of items.
"""
return len(self._values) if self._mat is None else self._mat.shape[1]
def __iter__(self):
for k in self._rev_keys:
yield k
def __contains__(self, key):
return key in self._rev_keys
def items(self):
if self._mat is None:
for k, v in self._rev_keys.items():
yield k, self._values[v]
else:
for k, v in self._rev_keys.items():
yield k, self._mat[self._values, v]
def keys(self):
for k in self._rev_keys.keys():
yield k
def values(self):
if self._mat is None:
for v in self._values:
yield v
else:
for v in self._mat[self._values]:
yield v
def asdict(self):
res = {}
for k, v in self.items():
res[k] = v
return res
def __str__(self):
return "ZipMap(%r)" % str(self.asdict())
class ArrayZipMapDictionary(list):
"""
Mocks an array without changing the data it receives.
Notebooks :ref:`onnxnodetimerst` illustrates the weaknesses
and the strengths of this class compare to a list
of dictionaries.
.. index:: ZipMap
"""
def __init__(self, rev_keys, mat):
"""
@param rev_keys dictionary *{keys: column index}*
@param mat matrix if values is a row index,
one or two dimensions
"""
if mat is not None:
if not isinstance(mat, numpy.ndarray):
raise TypeError( # pragma: no cover
'matrix is expected, got {}.'.format(type(mat)))
if len(mat.shape) not in (2, 3):
raise ValueError( # pragma: no cover
"matrix must have two or three dimensions but got {}"
".".format(mat.shape))
list.__init__(self)
self._rev_keys = rev_keys
self._mat = mat
@property
def dtype(self):
return self._mat.dtype
def __len__(self):
return self._mat.shape[0]
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, i):
return ZipMapDictionary(self._rev_keys, i, self._mat)
def __setitem__(self, pos, value):
raise RuntimeError(
"Changing an element is not supported (pos=[{}]).".format(pos))
@property
def values(self):
"""
Equivalent to ``DataFrame(self).values``.
"""
if len(self._mat.shape) == 3:
return self._mat.reshape((self._mat.shape[1], -1))
return self._mat
@property
def columns(self):
"""
Equivalent to ``DataFrame(self).columns``.
"""
res = [(v, k) for k, v in self._rev_keys.items()]
if len(res) == 0:
if len(self._mat.shape) == 2:
res = [(i, 'c%d' % i) for i in range(self._mat.shape[1])]
elif len(self._mat.shape) == 3:
# multiclass
res = [(i, 'c%d' % i)
for i in range(self._mat.shape[0] * self._mat.shape[2])]
else:
raise RuntimeError( # pragma: no cover
"Unable to guess the right number of columns for "
"shapes: {}".format(self._mat.shape))
else:
res.sort()
return [_[1] for _ in res]
@property
def is_zip_map(self):
return True
def __str__(self):
return 'ZipMaps[%s]' % ', '.join(map(str, self))
class ZipMap(OpRun):
"""
The class does not output a dictionary as
specified in :epkg:`ONNX` specifications
but a @see cl ArrayZipMapDictionary which
is wrapper on the input so that it does not
get copied.
"""
atts = {'classlabels_int64s': [], 'classlabels_strings': []}
def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=ZipMap.atts,
**options)
if hasattr(self, 'classlabels_int64s') and len(self.classlabels_int64s) > 0:
self.rev_keys_ = ZipMapDictionary.build_rev_keys(
self.classlabels_int64s)
elif hasattr(self, 'classlabels_strings') and len(self.classlabels_strings) > 0:
self.rev_keys_ = ZipMapDictionary.build_rev_keys(
self.classlabels_strings)
else:
self.rev_keys_ = {}
def _run(self, x): # pylint: disable=W0221
res = ArrayZipMapDictionary(self.rev_keys_, x)
return (res, )
def _infer_shapes(self, x): # pylint: disable=W0221
return (ShapeObject((x[0], ), dtype='map'), )
def _infer_types(self, x): # pylint: disable=W0221
"""
Returns the same shape by default.
"""
return ('map', )
|
"""
# TODO jstejska: Package description
"""
from .client import Client
from ..message import Message
class Sender(Client):
"""Abstract class of client's senders."""
def __init__(self, message_buffer=False):
"""Init
:param message_buffer: # TODO jstejska: description
:type message_buffer: # TODO jstejska: type
"""
super(Sender, self).__init__()
# Sender settings
self.message_buffer = message_buffer
self.messages = []
self.sent_messages = 0
@property
def last_message(self):
"""Method for pickup sent last message.
:return: message
:rtype: # TODO jstejska: type
"""
return self.messages[-1] if self.messages else None
def send_message(self, message: Message, **kwargs):
"""Method for send message.
:param message: # TODO jstejska: descritpion
:type: # TODO jstejska: type
:return: # TODO jstejska: description
:rtype # TODO jstejska: type
"""
if self.message_buffer:
self.messages.append(message)
else:
self.messages = [message]
self.sent_messages += 1
self._add_message(**kwargs)
self._send_message(**kwargs)
self.sent_messages += 1
def _send_message(self, **kwargs):
"""Method for yield unsoported send method.
:return: # TODO jstejska: description
:rtype: # TODO jstejska: type
"""
yield self._not_supported()
def _add_message(self, **kwargs):
"""Method for get message from arguments.
:param kwargs: dict with arguments
:type kwargs: # TODO jstejska: type
:return: # TODO jstejska: description
:rtype: # TODO jstejska: type
"""
message = ""
if "msg_content" in kwargs:
message = kwargs["msg_content"]
if self.message_buffer:
self.messages.append(message)
else:
self.messages = [message]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Adapted from https://www.byte-by-byte.com/dpbook/
# Copyright (c) 2019 Charles Joscelyne
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Given an integer representing a given amount of change, write a
function to compute the total minunum number of coins required
to make that amount of change. You can assume that there is
always a1¢ coin.
eg. (assuming US coins: 1, 5, 10, and 25 cents)
makeChange(1) = 1 (1)
makeChange(6) = 2 (5 + 1)
makeChange(49) = 7 (25 + 10 + 10 + 1 + 1 + 1 + 1)
Naive Making Change solution
Brute force solution. Go through every
combination of coins that sum up to c to
find the minimum number
"""
import math
import time
coins = [10, 6, 1]
def makeChange(c):
if (c == 0):
return 0;
minCoins = math.inf; #set minCoins to infinity
# Try removing each coin from the total and
# see how many more coins are required
for coin in coins:
# Skip a coin if it’s value is greater
# than the amount remaining
if (c - coin) >= 0:
currMinCoins = makeChange(c - coin)
if (currMinCoins < minCoins):
minCoins = currMinCoins
# Add back the coin removed recursively
return minCoins + 1
#Top down dynamic making change solution. Cache the values as we compute them.
#Uses only using O(c) space, even with the recursive stack.
#The time complexity is a O(c * (number of coins).
def makeChange_Cache(c):
# Initialize cache with values as -1
cache = [-1]*(c+1)
cache[0] = 0
return makeChange_helper(c, cache)
# Recursive helper function
def makeChange_helper(c, cache):
# Return the value if it’s in the cache
if (cache[c] >= 0):
return cache[c]
minCoins = math.inf; #set minCoins to infinity
# Find the best coin
for coin in coins:
if (c - coin) >= 0:
currMinCoins = makeChange_helper(c - coin, cache)
if (currMinCoins < minCoins):
minCoins = currMinCoins
# Save the value into the cache
cache[c] = minCoins + 1
return cache[c]
#Bottom up dynamic programming solution.
#Iteratively compute number of coins for
#larger and larger amounts of change
def makeChange_Bottom_Up(c):
cache = [0]*(c+1) #create cache
for i in range(1, c+1):
minCoins = math.inf
for coin in coins:
if (i - coin) >= 0:
currCoins = cache[i-coin] + 1
if (currCoins < minCoins):
minCoins = currCoins;
cache[i] = minCoins
return cache[c]
#TEST HARNESS
print("This test harness computes the mininum number of coins need")
print("for a given amount using coins: 10, 6, 1 for the")
print("three above methods along with their run times.")
print("Suggestion: please start small." )
n = int(input('Enter a non zero amount:'))
t0 = time.time()
f = makeChange(n)
t1 = time.time()
print("Test naive make change method")
print("Mininum of coins for amount",n,"=",f,"coins, time:",(t1-t0),"secs")
t0 = time.time()
f = makeChange_Cache(n)
t1 = time.time()
print("Test Cache change method")
print("Mininum of coins for amount",n,"=",f,"coins, time:",(t1-t0),"secs")
t0 = time.time()
f = makeChange_Bottom_Up(n)
t1 = time.time()
print("Test bottom up change method")
print("Mininum of coins for amount",n,"=",f,"coins, time:",(t1-t0),"secs")
"""
**** Sample Output ****
This test harness computes the mininum number of coins need
for a given amount using coins: 10, 6, 1 for the
three above methods along with their run times.
Suggestion: please start small.
Enter a non zero amount:57
Test naive make change method
Mininum of coins for amount 57 = 7 coins, time: 6.475013017654419 secs
Test Cache change method
Mininum of coins for amount 57 = 7 coins, time: 6.604194641113281e-05 secs
Test bottom up change method
Mininum of coins for amount 57 = 7 coins, time: 4.38690185546875e-05 secs
"""
|
# -*- coding: utf-8 -*-
from arc.http import session_read, session_write
from arc.http.session.jwe import _create_jwe, _parse_jwe
def test_jwe_read_write():
payload = {"foo": {"bar": 123}, "yak": None}
token = _create_jwe(payload)
parsed = _parse_jwe(token)
del parsed["iat"] # delete issued at timestamp
assert parsed == payload
def test_jwe_cookies(monkeypatch):
monkeypatch.setenv("SESSION_TABLE_NAME", "jwe")
cookie = session_write({"count": 0})
mock = {
"headers": {
"cookie": cookie,
},
}
session = session_read(mock)
assert "count" in session
assert session["count"] == 0
|
import re
from pathlib import Path
from openpecha.utils import download_pecha
from pedurma.pecha import PedurmaNoteEdit
from pedurma.texts import get_durchen, get_hfml_text, get_link, get_vol_meta
from pedurma.utils import from_yaml
def get_durchen_pages(vol_text):
durchen_pages = {}
pages = re.split(r"(〔[𰵀-]?\d+〕)", vol_text)
pg_ann = ""
for i, page in enumerate(pages[1:]):
if i % 2 == 0:
pg_ann = page
else:
durchen_pages[pg_ann] = page
return durchen_pages
def get_page_num(page_ann):
pg_pat = re.search(r"(\d+)", page_ann)
if pg_pat:
pg_num = pg_pat.group(1)
else:
pg_num = None
return pg_num
def rm_annotations(text, annotations):
clean_text = text
for ann in annotations:
clean_text = re.sub(ann, "", clean_text)
return clean_text
def get_num(line):
tib_num = re.sub(r"\W", "", line)
tib_num = re.sub(r"(\d+?)r", "", tib_num)
table = tib_num.maketrans("༡༢༣༤༥༦༧༨༩༠", "1234567890", "<r>")
eng_num = int(tib_num.translate(table))
return eng_num
def get_durchen_pg_num(clean_page):
pg_num = 0
try:
page_ann = re.findall(r"<p\d+-(\d+)\>", clean_page)
pg_num = page_ann[-1]
except Exception:
pass
return pg_num
def get_page_refs(page_content):
refs = re.findall(r"<r.+?>", page_content)
if refs:
if len(refs) > 2:
refs[0] = get_num(refs[0])
refs[-1] = get_num(refs[-1])
return (refs[0], refs[-1])
else:
refs[0] = get_num(refs[0])
return (refs[0], "0")
else:
return ("0", "0")
def process_page(page_ann, page_content, vol_meta):
durchen_image_num = get_page_num(page_ann)
pg_link = get_link(durchen_image_num, vol_meta)
unwanted_annotations = [r"〔[𰵀-]?\d+〕", r"\[\w+\.\d+\]", r"<d", r"d>"]
page_content = rm_annotations(page_content, unwanted_annotations)
durchen_pg_num = get_durchen_pg_num(page_content)
pg_ref_first, pg_ref_last = get_page_refs(page_content)
page_obj = PedurmaNoteEdit(
image_link=pg_link,
image_no=durchen_image_num,
page_no=durchen_pg_num,
ref_start_page_no=pg_ref_first,
ref_end_page_no=pg_ref_last,
vol=vol_meta["volume_number"],
)
return page_obj
def get_pages_to_edit(durchen_pages, vol_meta):
pages_to_edit = []
for page_ann, page_content in durchen_pages.items():
pages_to_edit.append(process_page(page_ann, page_content, vol_meta))
return pages_to_edit
def get_pedurma_edit_notes(hfml_text, text_meta):
pedurma_edit_notes = []
for vol, text_content in hfml_text.items():
vol_meta = get_vol_meta(vol, text_meta)
durchen = get_durchen(text_content)
durchen_pages = get_durchen_pages(durchen)
pedurma_edit_notes += get_pages_to_edit(durchen_pages, vol_meta)
return pedurma_edit_notes
def get_pedurma_text_edit_notes(pecha_id):
pecha_path = download_pecha(pecha_id, needs_update=False)
meta_data = from_yaml(Path(f"{pecha_path}/{pecha_id}.opf/meta.yml"))
hfmls = get_hfml_text(pecha_path, pecha_id)
pedurma_edit_notes = get_pedurma_edit_notes(hfmls, meta_data)
return pedurma_edit_notes
|
import argparse
from RC5 import RC5
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser._action_groups.pop()
required = parser.add_argument_group('Required arguments')
optional = parser.add_argument_group('Optional arguments')
required.add_argument('-i', '--input-file', help='Path to encrypted file', required=True)
required.add_argument('-k', '--key-file', help='Path to key file', required=True)
required.add_argument('-o', '--output-file', help='Path to result file.', required=True)
optional.add_argument('-w', '--block-size', help='RC5 block size. (32, 64 or 128 bits)', default=32)
optional.add_argument('-r', '--round-size', help='RC5 round count. (0 to 255)', default=12)
args = parser.parse_args()
with open(args.key_file, 'rb') as key_file:
key = key_file.read()
rc5 = RC5(args.block_size, args.round_size, key, strip_extra_nulls=False)
rc5.decryptFile(args.input_file, args.output_file)
|
import random
import json
class Player:
def __init__(self, _name, _x, _y, _health, _armour, _mobility, _weapon):
self.name = _name
self.x = _x
self.y = _y
self.health = _health
self.armour = _armour
self.mobility = _mobility
self.weapon = _weapon
#--General Information Methods
def get_player_name(self):
return self.name
def get_current_position(self):
return self.x, self.y
def get_current_health(self):
return self.health
def get_current_armour(self):
return self.armour
def get_current_mobility(self):
return self.mobility
def get_current_weapon(self):
return self.weapon
#--Identify Methods
def identify_new_information(self, tile_map):
"""
Method to get the contents of the surrounding tiles, the players current health, combat information etc. that will
be factored into the decision made by the player each time it is their turn to perform an action
:param tile_map: 2D array containing information on each tile in the map
:return: json object of all necessary information to be factored into the player's decision for what action to
carry out on their turn
# TODO: Add functionality to include the following in the JSON object: Combat Information, Current Weapon & Equipment
"""
info_json = "{\"surr_tiles\": " + str(self.check_surroundings(tile_map)) +\
", \"curr_health\": " + str(self.get_current_health()) + \
"}"
return json.loads(info_json)
def update_surrounding_tile_object_arrays(self, d_zone_array, enemy_array, tile, tile_pos):
"""
Function that checks a given tiles object based on the enum class 'tile object type' and updates the respective
list with it's position in relation to the player's current position
:param d_zone_array: list containing the index of the danger zone in relation to the player's current position
:param enemy_array: list containing the index of the enemy position(s) in relation to the player's current position
:param tile: the tile that is being checked
:param tile_pos: the index of the tile in relation to the current position of the player
:return: the updated lists of the danger_zone & enemy array
# TODO: Add functionality to include the following in the surround check: Buildings
"""
for t_object in tile.object:
if t_object.value == 1:
d_zone_array.append(tile_pos)
if t_object.value == 2:
enemy_array.append(tile_pos)
if t_object.value == 3:
#building_array.append(tile_pos)
continue
return d_zone_array, enemy_array
def check_surroundings(self, tile_map):
"""
Method to return information on the player's surrounding tiles, that will be factored into the decision made on
the player's turn
:param tile_map: 2D array containing information on each tile in the map
:return: json string of lists containing the index of their respective danger in relation to the player
# TODO: Clean up to make more efficient, once the logic has been figured out
"""
danger_zone_coords = []
enemy_zone_coords = []
# building_zone_coords = []
# Tile the player is currently in
self.update_surrounding_tile_object_arrays(danger_zone_coords, enemy_zone_coords, tile_map[self.x][self.y], 0)
# Tile north of player
if not self.x - 1 < 0:
self.update_surrounding_tile_object_arrays(danger_zone_coords, enemy_zone_coords, tile_map[self.x-1][self.y], 1)
else:
danger_zone_coords.append(1)
# Tile east of player
try:
self.update_surrounding_tile_object_arrays(danger_zone_coords, enemy_zone_coords, tile_map[self.x][self.y+1], 2)
except IndexError:
danger_zone_coords.append(2)
# Tile south of player
try:
self.update_surrounding_tile_object_arrays(danger_zone_coords, enemy_zone_coords, tile_map[self.x+1][self.y], 3)
except IndexError:
danger_zone_coords.append(3)
# Tile west of player
if not self.y - 1 < 0:
self.update_surrounding_tile_object_arrays(danger_zone_coords, enemy_zone_coords, tile_map[self.x][self.y-1], 4)
else:
danger_zone_coords.append(4)
# Removes one instance of a player object from the player's current tile, as that is the player themselves
enemy_zone_coords.remove(0)
return "{\"danger_zone\": " + str(danger_zone_coords)+ ", \"enemy\": " + str(enemy_zone_coords) + "}"
#--Assessment Methods
def assess_information(self, _json_info):
"""
Based on the info_json object, decisions are made and formatted into a decision_json object
Index 1 - Danger Above - Move X+1 (1 row down)
Index 2 - Danger Right - Move Y-1 (1 column left)
Index 3 - Danger Below - Move X-1 (1 row up)
Index 4 - Danger Left - Move Y+1 (1 column right)
:param _json_info: json object containing information on surrounding tiles
:return: all possible decisions the player can make, formatted as json object
TODO: Clean up to make more efficient, once the logic has been figured out
"""
decision_json = "[]"
# If all surrounding tiles are danger_zone, then the player can no longer move
if 0 < len(_json_info['surr_tiles']['danger_zone']) < 4:
curr_x, curr_y = self.get_current_position()
danger_index = _json_info['surr_tiles']['danger_zone'][0]
move_decision = "{\"move_player\": "
if danger_index == 1:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x + 1, curr_y]]) + "}"
if danger_index == 2:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x, curr_y - 1]]) + "}"
if danger_index == 3:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x - 1, curr_y]]) + "}"
if danger_index == 4:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x, curr_y + 1]]) + "}"
decision_json = decision_json[:-1] + move_decision + "]"
elif len(_json_info['surr_tiles']['enemy']) > 0:
curr_x, curr_y = self.get_current_position()
enemy_index = _json_info['surr_tiles']['enemy'][0]
if enemy_index == 0:
decision_json = decision_json[:-1] + "{\"attack_enemy\": []} ]"
print(decision_json)
return json.loads(decision_json)
move_decision = "{\"move_player\": "
if enemy_index == 1:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x + 1, curr_y]]) + "}"
if enemy_index == 2:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x, curr_y - 1]]) + "}"
if enemy_index == 3:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x - 1, curr_y]]) + "}"
if enemy_index == 4:
move_decision = move_decision + str([[curr_x, curr_y], [curr_x, curr_y + 1]]) + "}"
decision_json = decision_json[:-1] + move_decision + "]"
return json.loads(decision_json)
def make_decision(self, _info_json):
"""
Method to make a decision on behalf of the player based on info JSON object passed in, which is subsequently
passed to the assess_information method, for a list of possible decisions that the player should make
:param _info_json: json object containing information on surrounding tiles
:return: json object containing information on the move decided by the player
# TODO: Clean up to make more effecient, once the logic has been figured out. Add make decsion for combat, looting etc.
# TODO: Add functionality to include the following in the surround check: Combat, Looting, Healing
"""
decision_json = self.assess_information(_info_json)
print(decision_json)
if len(decision_json) > 0:
for key in decision_json[0]:
if key == "move_player":
return key, decision_json[0][key]
if key == "attack_enemy":
return key, decision_json[0][key]
else:
return "no_moves", []
#--Attempt Methods
def attempt_action(self):
"""
Method that essentially rolls a D20 dice, representing how successful the player's attempt at performing an
action
:return: a random integer from 1 - 20
"""
return random.randrange(1, 21)
def move_player(self, new_x, new_y):
"""
Method to move the player to a new position on the map
:param new_x: New X Coordinate of the player (Row)
:param new_y: New Y Coordinate of the player (Column)
"""
print("Old Position: " + str(self.get_current_position()))
self.x = new_x
self.y = new_y
print("New Position: " + str(self.get_current_position()))
#--Combat Methods
def take_damage(self, hit_value):
"""
Method to take damage to the player's health when in combat situation. If the player's health drops below 0, the
player's health is reset to 0, as that is the lowest it can potentially be
:param hit_value: The value of the damage taken from the player's health
"""
self.health = self.health - hit_value
if self.health < 0:
self.health = 0
def take_armour_damage(self, value):
"""
Method to take damage to the player's armour when in combat situation. If the player's armour drops below 1, the
player's armour is reset to 1, as that is the lowest it can potentially be
:param value: The value of the damage taken from the player's armour
"""
self.armour = self.armour - value
if self.armour < 1:
self.armour = 1
def update_mobility(self, value):
"""
Updates the player's mobility. This value may rise or fall based on the total weight of the equipment the player
is carrying (weapon, armour etc.)
:param value: The value that player's mobility will update by (can be a negative value)
"""
self.mobility = self.mobility + value |
from __future__ import absolute_import
from collectdutil.utils import ParsedConfig
import pytest
from unit.conftest import plugin_config
from kong.reporter import Reporter
from kong.config import Config
http_scoped_metrics = ('request_latency', 'kong_latency', 'upstream_latency', 'request_size',
'response_size', 'response_count')
status_code_scoped_metrics = ('response_count', 'upstream_latency', 'request_size', 'response_size')
@pytest.fixture()
def reporter(kong_state):
reporter = Reporter()
reporter.kong_state = kong_state
return reporter
def test_response_count_without_scoping(reporter):
reporter.config = plugin_config(report_id=False, report_name=False, report_route_id=False,
report_http_method=False, report_status_code=False)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_http_method_scope_metrics('response_count')
assert len(metrics) == 1
expected = sum([v['response_count'] for v in reporter.kong_state.resource_metrics.values()])
calculated = metrics.pop()
assert calculated.value == expected
assert calculated.dimensions == {}
@pytest.mark.parametrize('metric', http_scoped_metrics)
def test_calculate_http_scope_metrics_without_scoping(reporter, metric):
reporter.config = plugin_config(report_id=False, report_name=False, report_route_id=False,
report_http_method=False, report_status_code=False)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_http_method_scope_metrics(metric)
assert len(metrics) == 1
calculated = metrics.pop()
expected = sum([v[metric] for v in reporter.kong_state.resource_metrics.values()])
assert calculated.value == expected
assert calculated.dimensions == {}
@pytest.mark.parametrize('metric', http_scoped_metrics)
def test_calculate_http_scope_metrics_with_http_scoping(reporter, metric):
reporter.config = plugin_config(report_id=False, report_name=False, report_route_id=False,
report_http_method=True, report_status_code=False)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_http_method_scope_metrics(metric)
assert set([m.dimensions['http_method'] for m in metrics]) == set(reporter.kong_state.http_methods)
for calculated in metrics:
http_method = calculated.dimensions['http_method']
group_members = reporter.kong_state.http_methods[http_method]
expected = sum([reporter.kong_state.resource_metrics[ctx_hash][metric] for ctx_hash in group_members])
assert calculated.value == expected
assert calculated.dimensions == dict(http_method=http_method)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_status_code_scoping(reporter, metric):
reporter.config = plugin_config(report_id=False, report_name=False, report_route_id=False,
report_http_method=False, report_status_code=True)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
assert set([m.dimensions['status_code'] for m in metrics]) == set(reporter.kong_state.status_codes)
expected = []
for calculated in metrics:
status_code = calculated.dimensions['status_code']
group_members = reporter.kong_state.status_codes[status_code]
exp = sum([reporter.kong_state.resource_metrics[ctx_hash]['status_codes'].get(status_code, {metric: 0})[metric]
for ctx_hash in group_members])
expected.append(exp)
assert calculated.dimensions == dict(status_code=status_code)
assert [m.value for m in metrics] == expected
def to_metric_dimensions(context, status_code=None):
dimensions = {}
if status_code:
dimensions['status_code'] = status_code
for k in ('api_id', 'api_name', 'service_id', 'service_name', 'route_id', 'http_method'):
if context.get(k):
dimensions[k] = context[k]
return dimensions
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_full_scoping(reporter, metric):
reporter.config = plugin_config(resource_types=['api', 'service'], report_id=True, report_name=True,
report_route_id=True, report_http_method=True, report_status_code=True)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
metric_dimensions = [m.dimensions for m in metrics]
all_dimensions = []
for ctx_hash, ctx in reporter.kong_state.resource_metrics.items():
for sc in ctx['status_codes']:
all_dimensions.append(to_metric_dimensions(ctx, sc))
for dim in metric_dimensions:
assert dim in all_dimensions
for dim in all_dimensions:
assert dim in metric_dimensions
assert len(metric_dimensions) == len(all_dimensions)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_partial_scoping(reporter, metric):
reporter.config = plugin_config(report_api_id=False, report_api_name=False, report_service_id=True,
report_service_name=True, report_route_id=True, report_http_method=True,
report_status_code=True)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
metric_dimensions = [m.dimensions for m in metrics]
all_dimensions = []
for ctx_hash, ctx in reporter.kong_state.resource_metrics.items():
if ctx['api_id']:
continue
if not ctx['api_id'] and not ctx['service_id']:
continue # unscoped metrics will roll up with api metrics
for sc in ctx['status_codes']:
all_dimensions.append(to_metric_dimensions(ctx, sc))
api_member_dimensions = []
for api_id in [_id for _id in reporter.kong_state.api_ids if _id]:
for ctx_hash in reporter.kong_state.api_ids[api_id]:
ctx = reporter.kong_state.resource_metrics[ctx_hash]
for sc in ctx['status_codes']:
dimensions = dict(http_method=ctx['http_method'], status_code=sc)
if dimensions not in api_member_dimensions:
api_member_dimensions.append(dimensions)
all_dimensions.extend(api_member_dimensions)
for dim in metric_dimensions:
assert dim in all_dimensions
for dim in all_dimensions:
assert dim in metric_dimensions
assert len(metric_dimensions) == len(all_dimensions)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_full_blacklist(reporter, metric):
reporter.config = plugin_config(report_api_id=True, report_api_name=True, report_service_id=True,
report_service_name=True, report_route_id=True, report_http_method=True,
report_status_code=True, status_code_blacklist=['*'])
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
metric_dimensions = [m.dimensions for m in metrics]
all_dimensions = []
for ctx_hash, ctx in reporter.kong_state.resource_metrics.items():
all_dimensions.append(to_metric_dimensions(ctx))
for dim in metric_dimensions:
assert dim in all_dimensions
for dim in all_dimensions:
assert dim in metric_dimensions
assert len(metric_dimensions) == len(all_dimensions)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_partial_blacklist(reporter, metric):
blacklist = ['200', '201', '202', '203']
reporter.config = plugin_config(resource_types=['api', 'service'], report_id=True, report_name=True,
report_route_id=True, report_http_method=True, report_status_code=True,
status_code_blacklist=blacklist)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
metric_dimensions = [m.dimensions for m in metrics]
all_dimensions = []
for ctx_hash, ctx in reporter.kong_state.resource_metrics.items():
for sc in ctx['status_codes']:
if sc in blacklist:
sc = None
dimensions = to_metric_dimensions(ctx, sc)
if dimensions not in all_dimensions:
all_dimensions.append(dimensions)
for dim in metric_dimensions:
assert dim in all_dimensions
for dim in all_dimensions:
assert dim in metric_dimensions
assert len(metric_dimensions) == len(all_dimensions)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_full_scoping_and_status_code_groups(reporter, metric):
reporter.config = plugin_config(resource_types=['api', 'service'], report_id=True, report_name=True,
report_route_id=True, report_http_method=True, report_status_code_group=True)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
metric_dimensions = [m.dimensions for m in metrics]
all_dimensions = []
for ctx_hash, ctx in reporter.kong_state.resource_metrics.items():
for sc in ctx['status_codes']:
sc = '{0}xx'.format(sc[0])
dimension = to_metric_dimensions(ctx, sc)
if dimension not in all_dimensions:
all_dimensions.append(dimension)
for dim in metric_dimensions:
assert dim in all_dimensions
for dim in all_dimensions:
assert dim in metric_dimensions
assert len(metric_dimensions) == len(all_dimensions)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_calculate_status_code_scope_metrics_with_no_scoping_and_status_code_groups(reporter, metric):
reporter.config = plugin_config(report_status_code_group=True)
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
metric_dimensions = [m.dimensions for m in metrics]
all_dimensions = []
for sc in reporter.kong_state.status_codes:
sc = '{0}xx'.format(sc[0])
dimension = to_metric_dimensions({}, sc)
if dimension not in all_dimensions:
all_dimensions.append(dimension)
for dim in metric_dimensions:
assert dim in all_dimensions
for dim in all_dimensions:
assert dim in metric_dimensions
assert len(metric_dimensions) == len(all_dimensions)
@pytest.mark.parametrize('metric', status_code_scoped_metrics)
def test_confirm_metrics_source_extra_dimensions(reporter, metric):
cfg_str = '''
ExtraDimension "test_dimension" "test_val"
ExtraDimension "another_dimension" "another_val"
'''
reporter.config = Config(ParsedConfig(cfg_str))
reporter.update_http_method_scope_groups()
metrics = reporter.calculate_status_code_scope_metrics(metric)
assert metrics
for met in metrics:
assert met.dimensions['test_dimension'] == 'test_val'
assert met.dimensions['another_dimension'] == 'another_val'
|
import itertools
from typing import Dict, List
from exasol_integration_test_docker_environment.abstract_method_exception import AbstractMethodException
from exasol_integration_test_docker_environment.lib.base.base_task import BaseTask
from exasol_integration_test_docker_environment.lib.docker.images.create.docker_image_create_task import \
DockerCreateImageTask, DockerCreateImageTaskWithDeps
from exasol_integration_test_docker_environment.lib.docker.images.required_task_info import RequiredTaskInfo
class TaskCreatorFromBuildTasks:
def create_tasks_for_build_tasks(self, build_tasks: Dict[str, DockerCreateImageTask]) \
-> List[BaseTask]:
tasks_per_goal = [self._create_tasks_for_build_task(build_task)
for goal, build_task in build_tasks.items()]
return list(itertools.chain.from_iterable(tasks_per_goal))
def _create_tasks_for_build_task(self, build_task: DockerCreateImageTask) \
-> List[BaseTask]:
if isinstance(build_task, DockerCreateImageTaskWithDeps):
tasks = self.create_tasks_for_build_tasks(build_task.required_tasks)
task = self._create_task(build_task)
return [task] + tasks
else:
task = self._create_task(build_task)
return [task]
def _create_task(self, build_task):
required_task_info = self._create_required_task_info(build_task)
task = self.create_task_with_required_tasks(build_task, required_task_info)
return task
def _create_required_task_info(self, build_task: DockerCreateImageTask):
required_task_info = \
RequiredTaskInfo(module_name=build_task.__module__,
class_name=build_task.__class__.__name__,
params=build_task.param_kwargs)
return required_task_info
def create_task_with_required_tasks(self, build_task, required_task_info) -> BaseTask:
raise AbstractMethodException()
|
# Generated by Django 3.2.11 on 2022-01-16 00:05
from django.db import migrations
# pylint: disable=unused-argument
def add_roles(apps, schema_editor):
"""
Add the default roles for users
:param apps: The configuration of installed applications
:type apps: ~django.apps.registry.Apps
:param schema_editor: The database abstraction layer that creates actual SQL code
:type schema_editor: ~django.db.backends.base.schema.BaseDatabaseSchemaEditor
"""
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Group = apps.get_model("auth", "Group")
Permission = apps.get_model("auth", "Permission")
management_group = Group.objects.get(name="MANAGEMENT")
delete_imprint_permission = Permission.objects.get(codename="delete_imprintpage")
management_group.permissions.add(delete_imprint_permission)
# pylint: disable=unused-argument
def remove_roles(apps, schema_editor):
"""
Remove the default roles for users
:param apps: The configuration of installed applications
:type apps: ~django.apps.registry.Apps
:param schema_editor: The database abstraction layer that creates actual SQL code
:type schema_editor: ~django.db.backends.base.schema.BaseDatabaseSchemaEditor
"""
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Group = apps.get_model("auth", "Group")
Permission = apps.get_model("auth", "Permission")
management_group = Group.objects.get(name="MANAGEMENT")
delete_imprint_permission = Permission.objects.get(codename="delete_imprintpage")
management_group.permissions.remove(delete_imprint_permission)
class Migration(migrations.Migration):
"""
Migration file to grant the imprint deletion permission to the management role
"""
dependencies = [
("cms", "0004_alter_model_ordering"),
]
operations = [
migrations.RunPython(add_roles, remove_roles),
]
|
import logging
from ..core import Core
__LOGGER__ = logging.getLogger(__name__)
class GetSearchableMailboxes(Core):
'''Identifies all searchable mailboxes based on the provided UserConfiguration object's permissions
Example:
To use any service class you must provide a UserConfiguration object first.
You can acquire
```python
from pyews import UserConfiguration
from pyews import GetSearchableMailboxes
userconfig = UserConfiguration(
'first.last@company.com',
'mypassword123'
)
searchable_mailboxes = GetSearchableMailboxes(userconfig).run()
```
If you want to use a property from this object with another class then you can iterate through the list of of mailbox properties.
For example, if used in conjunction with the :doc:`searchmailboxes` you first need to create a list of mailbox reference_ids.
```python
id_list = []
for id in searchable_mailboxes.run():
id_list.append(id.get('reference_id'))
searchResults = SearchMailboxes(userconfig).run('subject:"Phishing Email Subject"', id_list)
```
Args:
userconfiguration (UserConfiguration): A UserConfiguration object created using the UserConfiguration class
'''
def __init__(self, userconfiguration, stop_on_error=False):
super().__init__(userconfiguration, stop_on_error)
def __parse_response(self, value):
'''Creates and sets a response object
Args:
value (str): The raw response from a SOAP request
'''
if not value:
raise ValueError("Empty response")
return_list = []
if value.find('ResponseCode').string == 'NoError':
for item in value.find_all('SearchableMailbox'):
return_list.append({
'reference_id': item.ReferenceId.string,
'primary_smtp_address': item.PrimarySmtpAddress.string,
'display_name': item.DisplayName.string,
'is_membership_group': item.IsMembershipGroup.string,
'is_external_mailbox': item.IsExternalMailbox.string,
'external_email_address': item.ExternalEmailAddress.string,
'guid': item.Guid.string
})
return return_list
def run(self):
soap_message = self.soap()
self.raw_xml = self.invoke(soap_message)
return self.__parse_response(self.raw_xml)
def soap(self):
'''Creates the SOAP XML message body
Returns:
str: Returns the SOAP XML request body
'''
if self.userconfiguration.impersonation:
impersonation_header = self.userconfiguration.impersonation.header
else:
impersonation_header = ''
return '''<?xml version="1.0" encoding="UTF-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages">
<soap:Header>
<t:RequestServerVersion Version="{version}" />
{header}
</soap:Header>
<soap:Body >
<m:GetSearchableMailboxes>
<m:ExpandGroupMembership>true</m:ExpandGroupMembership>
</m:GetSearchableMailboxes>
</soap:Body>
</soap:Envelope>'''.format(
version=self.userconfiguration.exchange_version,
header=impersonation_header)
|
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import json
import os
from subprocess import run
import shutil
import pytest
from ote_cli.registry import Registry
from tests.ote_cli.common import collect_env_vars, get_some_vars, create_venv, patch_demo_py
args = {
'--train-ann-file': 'data/airport/annotation_example_train.json',
'--train-data-roots': 'data/airport/train',
'--val-ann-file': 'data/airport/annotation_example_train.json',
'--val-data-roots': 'data/airport/train',
'--test-ann-files': 'data/airport/annotation_example_train.json',
'--test-data-roots': 'data/airport/train',
}
root = '/tmp/ote_cli/'
ote_dir = os.getcwd()
templates = Registry('external').filter(task_type='DETECTION').templates
templates_ids = [template.model_template_id for template in templates]
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_train(template):
work_dir, template_work_dir, algo_backend_dir = get_some_vars(template, root)
create_venv(algo_backend_dir, work_dir, template_work_dir)
command_line = ['ote',
'train',
template.model_template_id,
'--train-ann-file',
f'{os.path.join(ote_dir, args["--train-ann-file"])}',
'--train-data-roots',
f'{os.path.join(ote_dir, args["--train-data-roots"])}',
'--val-ann-file',
f'{os.path.join(ote_dir, args["--val-ann-file"])}',
'--val-data-roots',
f'{os.path.join(ote_dir, args["--val-data-roots"])}',
'--save-model-to',
f'{template_work_dir}/trained_{template.model_template_id}',
'params',
'--learning_parameters.num_iters',
'2',
'--learning_parameters.batch_size',
'2']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/weights.pth')
assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/label_schema.json')
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_export(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
command_line = ['ote',
'export',
template.model_template_id,
'--load-weights',
f'{template_work_dir}/trained_{template.model_template_id}/weights.pth',
f'--save-model-to',
f'{template_work_dir}/exported_{template.model_template_id}']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml')
assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/openvino.bin')
assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/label_schema.json')
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_eval(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
command_line = ['ote',
'eval',
template.model_template_id,
'--test-ann-file',
f'{os.path.join(ote_dir, args["--test-ann-files"])}',
'--test-data-roots',
f'{os.path.join(ote_dir, args["--test-data-roots"])}',
'--load-weights',
f'{template_work_dir}/trained_{template.model_template_id}/weights.pth',
'--save-performance',
f'{template_work_dir}/trained_{template.model_template_id}/performance.json']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
assert os.path.exists(f'{template_work_dir}/trained_{template.model_template_id}/performance.json')
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_eval_openvino(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
command_line = ['ote',
'eval',
template.model_template_id,
'--test-ann-file',
f'{os.path.join(ote_dir, args["--test-ann-files"])}',
'--test-data-roots',
f'{os.path.join(ote_dir, args["--test-data-roots"])}',
'--load-weights',
f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml',
'--save-performance',
f'{template_work_dir}/exported_{template.model_template_id}/performance.json']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
assert os.path.exists(f'{template_work_dir}/exported_{template.model_template_id}/performance.json')
with open(f'{template_work_dir}/trained_{template.model_template_id}/performance.json') as read_file:
trained_performance = json.load(read_file)
with open(f'{template_work_dir}/exported_{template.model_template_id}/performance.json') as read_file:
exported_performance = json.load(read_file)
for k in trained_performance.keys():
assert abs(trained_performance[k] - exported_performance[k]) / trained_performance[k] <= 0.00, f"{trained_performance[k]=}, {exported_performance[k]=}"
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_demo(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
command_line = ['ote',
'demo',
template.model_template_id,
'--load-weights',
f'{template_work_dir}/trained_{template.model_template_id}/weights.pth',
'--input',
f'{os.path.join(ote_dir, args["--test-data-roots"])}',
'--delay',
'-1']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_demo_openvino(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
command_line = ['ote',
'demo',
template.model_template_id,
'--load-weights',
f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml',
'--input',
f'{os.path.join(ote_dir, args["--test-data-roots"])}',
'--delay',
'-1']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_deploy_openvino(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
deployment_dir = f'{template_work_dir}/deployed_{template.model_template_id}'
command_line = ['ote',
'deploy',
template.model_template_id,
'--load-weights',
f'{template_work_dir}/exported_{template.model_template_id}/openvino.xml',
f'--save-model-to',
deployment_dir]
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
assert run(['unzip', 'openvino.zip'],
cwd=deployment_dir).returncode == 0
assert run(['python3', '-m', 'venv', 'venv'],
cwd=os.path.join(deployment_dir, 'python')).returncode == 0
assert run(['python3', '-m', 'pip', 'install', 'wheel'],
cwd=os.path.join(deployment_dir, 'python'),
env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0
assert run(['python3', '-m', 'pip', 'install', 'demo_package-0.0-py3-none-any.whl'],
cwd=os.path.join(deployment_dir, 'python'),
env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0
patch_demo_py(os.path.join(deployment_dir, 'python', 'demo.py'),
os.path.join(deployment_dir, 'python', 'demo_patched.py'))
assert run(['python3', 'demo_patched.py', '-m', '../model/model.xml', '-i', f'{os.path.join(ote_dir, args["--test-data-roots"])}'],
cwd=os.path.join(deployment_dir, 'python'),
env=collect_env_vars(os.path.join(deployment_dir, 'python'))).returncode == 0
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ote_hpo(template):
work_dir, template_work_dir, _ = get_some_vars(template, root)
if os.path.exists(f"{template_work_dir}/hpo"):
shutil.rmtree(f"{template_work_dir}/hpo")
command_line = ['ote',
'train',
template.model_template_id,
'--train-ann-file',
f'{os.path.join(ote_dir, args["--train-ann-file"])}',
'--train-data-roots',
f'{os.path.join(ote_dir, args["--train-data-roots"])}',
'--val-ann-file',
f'{os.path.join(ote_dir, args["--val-ann-file"])}',
'--val-data-roots',
f'{os.path.join(ote_dir, args["--val-data-roots"])}',
'--save-model-to',
f'{template_work_dir}/hpo_trained_{template.model_template_id}',
'--enable-hpo',
'--hpo-time-ratio',
'1',
'params',
'--learning_parameters.num_iters',
'2',
'--learning_parameters.batch_size',
'2']
assert run(command_line, env=collect_env_vars(work_dir)).returncode == 0
assert os.path.exists(f"{template_work_dir}/hpo/hpopt_status.json")
with open(f"{template_work_dir}/hpo/hpopt_status.json", "r") as f:
assert json.load(f).get('best_config_id', None) is not None
assert os.path.exists(f'{template_work_dir}/hpo_trained_{template.model_template_id}/weights.pth')
assert os.path.exists(f'{template_work_dir}/hpo_trained_{template.model_template_id}/label_schema.json')
def test_notebook():
work_dir = os.path.join(root, 'DETECTION')
assert run(['pytest', '--nbmake', 'ote_cli/notebooks/train.ipynb', '-v'], env=collect_env_vars(work_dir)).returncode == 0
|
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", type=str, default="/home/ubuntu/temps")
parser.add_argument("--checkpoints", type=str, default="../../checkpoints")
parser.add_argument("--temps", type=str, default="./temps")
parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--dataset", type=str, default="celeba")
parser.add_argument("--input_height", type=int, default=None)
parser.add_argument("--input_width", type=int, default=None)
parser.add_argument("--input_channel", type=int, default=None)
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--bs", type=int, default=100)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--attack_mode", type=str, default="all2one", help="all2one or all2all")
parser.add_argument("--target_label", type=int, default=0)
parser.add_argument("--outfile", type=str, default="./results.txt")
parser.add_argument("--k", type=int, default=4)
parser.add_argument("--s", type=float, default=0.5)
parser.add_argument("--grid_rescale", type=float, default=1)
return parser
|
import grpc
from SuperSonic.service import schedule_pb2
from SuperSonic.service import schedule_pb2_grpc
# 因为 RPC 应该长时间运行,考虑到性能,还需要用到并发的库。
import time # 设置系统延时,
from concurrent import futures
_ONE_DAY_IN_SECONDS = 60 * 60 * 24 # 设置服务器运行的默认时间长度
# add by zc
from multiprocessing import Lock
import numpy as np
# 同步锁
import threading
lock = threading.Lock()
lock_s = threading.Lock()
maxLen_start = 4000
init_space = np.array([1 for _ in range(4000)])
from copy import deepcopy
from gensim.models.doc2vec import Doc2Vec, LabeledSentence
import gym
import compiler_gym
from gym.spaces import Discrete, Dict, Box
from gym import spaces
import logging
import random
DEBUG = False
# end by zc grpc
logger = logging.getLogger(__name__)
# NeuroVectorizer RL Environment
# client
class autotvm_rl(gym.Env):
def __init__(self, env_config):
self.env = gym.make(
"Tvm-v0",
state_function=env_config.get("state_function"),
action_function=env_config.get("action_function"),
reward_function=env_config.get("reward_function"),
)
self.maxLen = 200
self.interleave_action_meaning = [
_ for _ in range(maxLen_start)
] # TODO: 根据需要更改action的空间
self.action_space = Discrete(
maxLen_start
) # action_len
print(self.action_space, "/n=================action space=======/n")
self.observation_space = Dict(
{
"obs": self.env.observation_space,
"action_mask": Box(low=0, high=1, shape=(self.action_space.n,)),
}
)
self.running_reward = 0
def reset(self):
self.running_reward = 0
return {"obs": self.env.reset(), "action_mask": init_space}
# self.action_space.n/2
def step(self, action):
with grpc.insecure_channel('localhost:50061') as channel:
stub = schedule_pb2_grpc.ScheduleServiceStub(channel)
response = stub.GetTvm(
schedule_pb2.TvmRequest(action=action)
)
obs, rew, done, info = self.env.step(
action, response.state, response.reward
)
self.running_reward += rew
score = self.running_reward if done else 0
if self.maxLen != response.maxLen:
self.maxLen = response.maxLen
for _ in range(self.maxLen):
init_space[_] = 1
for _ in range(self.maxLen, maxLen_start):
init_space[_] = 0
init_space[action] = 0
return (
{"obs": obs, "action_mask": init_space},
score,
done,
info,
)
def set_state(self, state):
self.running_reward = state[1]
self.env = deepcopy(state[0])
obs = np.array(list(self.env.unwrapped.state))
return {"obs": obs, "action_mask": init_space}
def get_state(self):
return deepcopy(self.env), self.running_reward
|
import math
import numpy as np
import torch
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
# For language modeling
# Adapted from https://github.com/salesforce/awd-lstm-lm/blob/master/utils.py
def repackage_hidden(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data
def get_batch(source, i, seq_len):
seq_len = min(seq_len, len(source) - 1 - i)
data = source[i:i+seq_len].t()
target = source[i+1:i+1+seq_len].t().reshape(-1)
return data, target
|
"""
Module that interacts with the orchestrator CLI.
Provide the interfaces to ceph orch and in turn manage the orchestration engine.
"""
import logging
from datetime import datetime, timedelta
from json import loads
from time import sleep
from typing import List
from ceph.ceph import ResourceNotFoundError
from .ceph import CephCLI
from .ls import LSMixin
from .ps import PSMixin
from .remove import RemoveMixin
LOG = logging.getLogger()
class Orch(LSMixin, PSMixin, RemoveMixin, CephCLI):
"""Represent ceph orch command."""
direct_calls = ["ls", "ps"]
def check_service_exists(
self, service_name: str, ids: List[str], timeout: int = 300, interval: int = 5
) -> bool:
"""
Verify the provided service is running for the given list of ids.
Args:
service_name: The name of the service to be checked.
ids: The list of daemons to be checked for that service.
timeout: In seconds, the maximum allowed time. By default 5 minutes
interval: In seconds, the polling interval time.
Returns:
True if the service and the list of daemons are running else False.
"""
end_time = datetime.now() + timedelta(seconds=timeout)
while end_time > datetime.now():
sleep(interval)
out, err = self.ps({"base_cmd_args": {"format": "json"}})
out = loads(out)
daemons = [d for d in out if d.get("daemon_type") == service_name]
count = 0
for _id in ids:
for daemon in daemons:
if (
_id in daemon["daemon_id"]
and daemon["status_desc"] == "running"
):
count += 1
LOG.info("%s/%s %s daemon(s) up... retrying", count, len(ids), service_name)
if count == len(ids):
return True
# Identify the failure
out, err = self.ls({"base_cmd_args": {"format": "json"}})
for item in loads(out):
if (
service_name in item.get("service_type")
and item["status"].get("running") == 0
):
LOG.error("Service status(es): %s", item)
LOG.error("Service event(s): %s", item["events"])
return False
def get_role_service(self, service_name: str) -> str:
"""
Get service info by name.
Args:
service_name: service name
Returns:
service
Raises:
ResourceNotFound: when no resource with the provided is matched.
"""
out, _ = self.ls()
for svc in loads(out):
if service_name in svc.get("service_name"):
return svc
raise ResourceNotFoundError(f"No service names matched {service_name}")
def check_service(
self, service_name: str, timeout: int = 300, interval: int = 5, exist=True
) -> bool:
"""
check service existence based on the exist parameter
if exist is set, then validate its presence.
otherwise, for its removal.
Args:
service_name: service name
timeout: timeout in seconds
interval: interval in seconds
exist: boolean
Returns:
service
"""
end_time = datetime.now() + timedelta(seconds=timeout)
while end_time > datetime.now():
sleep(interval)
out, err = self.ls({"base_cmd_args": {"format": "json"}})
out = loads(out)
service = [d for d in out if d.get("service_name") == service_name]
if service_name not in service and not exist:
return True
elif service_name in service and exist:
return True
LOG.info("[%s] check for existence: %s, retrying" % (service_name, exist))
return False
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _TencentCloud
class _Application(_TencentCloud):
_type = "application"
_icon_dir = "resources/tencentcloud/application"
class CloudMessageQueue(_Application):
_icon = "cloud-message-queue.png"
class LoadMaster(_Application):
_icon = "load-master.png"
class LogService(_Application):
_icon = "log-service.png"
# Aliases
CLS = LogService
CMQ = CloudMessageQueue
LM = LoadMaster |
from django.conf.urls import include, url
from .views import ProductDetailView, ProductListView, VariationListView
urlpatterns = [
url(r'^$', ProductListView.as_view(), name="list"),
url(r'^(?P<pk>\d+)/$', ProductDetailView.as_view(), name="detail"),
url(r'^(?P<pk>\d+)/inventory/$', VariationListView.as_view(), name="variation_list"),
] |
#! /usr/bin/python
import numpy as np
def compute_reach_coefficent(distances: np.ndarray, time: float):
tmp = distances <= time
return tmp.astype(np.int8)
|
from .na_missfortune_top import *
from .na_missfortune_jng import *
from .na_missfortune_mid import *
from .na_missfortune_bot import *
from .na_missfortune_sup import *
|
import unittest
from ietfparse import errors, headers
class ForwardedHeaderParsingTests(unittest.TestCase):
def test_that_whitespace_is_irrelevant(self):
# RFC7239. sec 7.1
self.assertEqual(
headers.parse_forwarded('for=192.0.2.43,'
'for="[2001:db8:cafe::17]",for=unknown'),
headers.parse_forwarded('for=192.0.2.43, '
'for="[2001:db8:cafe::17]", for=unknown'))
def test_that_order_is_preserved(self):
parsed = headers.parse_forwarded('for=192.0.2.43,'
'for="[2001:db8:cafe::17]",'
'for=unknown')
self.assertEqual(len(parsed), 3)
self.assertEqual(parsed[0], {'for': '192.0.2.43'})
self.assertEqual(parsed[1], {'for': '[2001:db8:cafe::17]'})
self.assertEqual(parsed[2], {'for': 'unknown'})
def test_that_param_names_are_normalized(self):
parsed = headers.parse_forwarded('For="[2001:db8:cafe::17]:4711"')
self.assertEqual(parsed, [{'for': '[2001:db8:cafe::17]:4711'}])
def test_parsing_full_header(self):
parsed = headers.parse_forwarded(
'for=192.0.2.60;proto=http;'
'by=203.0.113.43;host=example.com',
only_standard_parameters=True)
self.assertEqual(parsed[0]['for'], '192.0.2.60')
self.assertEqual(parsed[0]['proto'], 'http')
self.assertEqual(parsed[0]['by'], '203.0.113.43')
self.assertEqual(parsed[0]['host'], 'example.com')
def test_that_non_standard_parameters_are_parsed(self):
parsed = headers.parse_forwarded('for=127.0.0.1;one=two')
self.assertEqual(parsed[0]['one'], 'two')
def test_that_non_standard_parameters_can_be_prohibited(self):
with self.assertRaises(errors.StrictHeaderParsingFailure) as context:
headers.parse_forwarded('for=127.0.0.1;one=2',
only_standard_parameters=True)
self.assertEqual(context.exception.header_name, 'Forwarded')
self.assertEqual(context.exception.header_value, 'for=127.0.0.1;one=2')
|
'''
Created on 2 Dec 2017
@author: julianporter
'''
from setuptools import Command
import shutil
import os
class Cleaner(Command) :
user_options=[]
def __init__(self,dist,**kwargs):
super(Cleaner,self).__init__(dist,**kwargs)
def initialize_options(self):
self.directories=[]
self.files=[]
def finalize_options(self):
self.directories=['build','dist','wav2mp3.egg-info']
self.files=[]
def run(self):
for directory in self.directories:
try:
shutil.rmtree(directory)
print("{} deleted".format(directory))
except FileNotFoundError:
print("{} does not exist, so not deleted...".format(directory))
except Exception as e:
print("{} : {}".format(e.__class__.__name__,e))
for file in self.files:
try:
os.remove(file)
print("{} deleted".format(directory))
except FileNotFoundError:
print("{} does not exist, so not deleted...".format(directory))
except Exception as e:
print("{} : {}".format(e.__class__.__name__,e))
|
"""Sensor for the Štamper Peludna Prognoza."""
from datetime import timedelta, datetime, date
import logging
import voluptuous as vol
import requests
import lxml
from bs4 import BeautifulSoup
from homeassistant.const import (
CONF_NAME,
CONF_LATITUDE,
CONF_LONGITUDE,
__version__,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import SensorEntity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_ALLERGENS = "allergens"
ATTR_NAME = "name"
ATTR_DATE = "date"
ATTR_LEVEL = "level"
ATTR_STATION = "station"
ATTR_UPDATED = "updated"
ATTR_DESCRIPTION = "description"
ATTR_IMAGE = "image"
ATTR_FORECAST = "forecast"
ATTRIBUTION = "Data provided by stampar.hr"
DEFAULT_NAME = "Peludna prognoza"
CONF_STATION_ID = "station_id"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=60)
SENSOR_TYPES = {
2 : { "name": "Lijeska" , "name_clean": "lijeska" , "lat": "Corylus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
3 : { "name": "Joha" , "name_clean": "joha" , "lat": "Alnus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
4 : { "name": "Čempresi" , "name_clean": "cempresi" , "lat": "Cupressaceae" , "type": "Drveće", "icon": "mdi:flower-pollen" },
5 : { "name": "Jasen" , "name_clean": "jasen" , "lat": "Fraxinus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
6 : { "name": "Breza" , "name_clean": "breza" , "lat": "Betula sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
7 : { "name": "Grab" , "name_clean": "grab" , "lat": "Carpinus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
8 : { "name": "Hrast" , "name_clean": "hrast" , "lat": "Quercus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
9 : { "name": "Platana" , "name_clean": "platana" , "lat": "Platanus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
11: { "name": "Koprive" , "name_clean": "koprive" , "lat": "Urticaceae" , "type": "Korovi", "icon": "mdi:flower-pollen" },
12: { "name": "Pitomi kesten" , "name_clean": "pitomi_kesten" , "lat": "Castanea sativa", "type": "Drveće", "icon": "mdi:flower-pollen" },
13: { "name": "Ambrozija" , "name_clean": "ambrozija" , "lat": "Ambrosia sp." , "type": "Korovi", "icon": "mdi:flower-pollen" },
14: { "name": "Pelin" , "name_clean": "pelin" , "lat": "Artemisia sp." , "type": "Korovi", "icon": "mdi:flower-pollen" },
15: { "name": "Loboda" , "name_clean": "loboda" , "lat": "Chenopodiaceae" , "type": "Korovi", "icon": "mdi:flower-pollen" },
16: { "name": "Maslina" , "name_clean": "maslina" , "lat": "Olea sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
17: { "name": "Bor" , "name_clean": "bor" , "lat": "Pinus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
18: { "name": "Hrast crnika" , "name_clean": "hrast_crnika" , "lat": "Quercus ilex" , "type": "Drveće", "icon": "mdi:flower-pollen" },
19: { "name": "Crkvina" , "name_clean": "crkvina" , "lat": "Parietaria sp." , "type": "Korovi", "icon": "mdi:flower-pollen" },
20: { "name": "Vrba" , "name_clean": "vrba" , "lat": "Salix sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
21: { "name": "Trputac" , "name_clean": "trputac" , "lat": "Plantago sp." , "type": "Korovi", "icon": "mdi:flower-pollen" },
22: { "name": "Topola" , "name_clean": "topola" , "lat": "Populus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
25: { "name": "Brijest" , "name_clean": "brijest" , "lat": "Ulmus sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
26: { "name": "Lipa" , "name_clean": "lipa" , "lat": "Tilia sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
27: { "name": "Orah" , "name_clean": "orah" , "lat": "Juglans sp." , "type": "Drveće", "icon": "mdi:flower-pollen" },
28: { "name": "Kiselica" , "name_clean": "kiselica" , "lat": "Rumex sp." , "type": "Korovi", "icon": "mdi:flower-pollen" },
90: { "name": "Drveće" , "name_clean": "drvece" , "lat": "Drveće" , "type": "Drveće", "icon": "mdi:flower-pollen" },
10: { "name": "Trave" , "name_clean": "trave" , "lat": "Poaceae" , "type": "Trave" , "icon": "mdi:flower-pollen" },
92: { "name": "Korovi" , "name_clean": "korovi" , "lat": "Korovi" , "type": "Korovi", "icon": "mdi:flower-pollen" },
}
STATIONS = {
"2" : { "name":"Beli Manastir" , "lat": 45.7729, "long": 18.6108 },
"3" : { "name":"Dubrovnik" , "lat": 42.6507, "long": 18.0944 },
"4" : { "name":"Đakovo" , "lat": 45.3100, "long": 18.4098 },
"5" : { "name":"Karlovac" , "lat": 45.4929, "long": 15.5553 },
"6" : { "name":"Koprivnica" , "lat": 46.1639, "long": 16.8335 },
"7" : { "name":"Kutina" , "lat": 45.4793, "long": 16.7763 },
"8" : { "name":"Labin" , "lat": 45.0916, "long": 14.1238 },
"9" : { "name":"Metković" , "lat": 43.0533, "long": 17.6493 },
"10": { "name":"Našice" , "lat": 45.4947, "long": 18.0951 },
"11": { "name":"Osijek" , "lat": 45.5550, "long": 18.6955 },
"12": { "name":"Pazin" , "lat": 45.2398, "long": 13.9373 },
"13": { "name":"Popovača" , "lat": 45.5713, "long": 16.6274 },
"14": { "name":"Poreč" , "lat": 45.2272, "long": 13.5947 },
"15": { "name":"Pula" , "lat": 44.8666, "long": 13.8496 },
"16": { "name":"Rijeka" , "lat": 45.3271, "long": 14.4422 },
"17": { "name":"Šibenik" , "lat": 43.7350, "long": 15.8952 },
"18": { "name":"Sisak" , "lat": 45.4851, "long": 16.3731 },
"19": { "name":"Slavonski Brod" , "lat": 45.1631, "long": 18.0116 },
"20": { "name":"Split" , "lat": 43.5147, "long": 16.4435 },
"21": { "name":"Varaždin" , "lat": 46.3057, "long": 16.3366 },
"22": { "name":"Virovitica" , "lat": 45.8316, "long": 17.3855 },
"23": { "name":"Zadar" , "lat": 44.1194, "long": 15.2314 },
"24": { "name":"Zagreb" , "lat": 45.8150, "long": 15.9819 },
}
LEVELS = {
"0": { "from": 0 , "to": 0 , "medium": "0" , "title": "nema peludi" , "title_eng": "none" , "color": "gray" },
"1": { "from": 0 , "to": 2 , "medium": "1" , "title": "niska" , "title_eng": "low" , "color": "green" },
"2": { "from": 2 , "to": 6 , "medium": "4" , "title": "umjerena" , "title_eng": "medium" , "color": "yellow" },
"3": { "from": 6 , "to": 12, "medium": "9" , "title": "visoka" , "title_eng": "high" , "color": "orange" },
"4": { "from": 12, "to": 99, "medium": "15", "title": "jako visoka" , "title_eng": "veryhigh" , "color": "red" },
}
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_ALLERGENS, default=list(SENSOR_TYPES) ): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Štampar Pelud sensor platform."""
name = config.get(CONF_NAME)
station_id = config.get(CONF_STATION_ID)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
station_id = config.get(CONF_STATION_ID)
if station_id:
_LOGGER.debug("Configuration station_id: %s", station_id)
if station_id not in STATIONS:
_LOGGER.error("Configuration %s: %s , is not known", CONF_STATION_ID, station_id)
return False
else:
station_id = closest_station(latitude, longitude)
_LOGGER.debug("Found closest station_id: %s", station_id)
station_name = STATIONS[station_id]["name"]
_LOGGER.debug("Determined station name: %s", station_name)
probe = StamparPeludData(station_id=station_id, station_name=station_name)
try:
probe.update()
except (ValueError, TypeError) as err:
_LOGGER.error("Received error from stampar.hr: %s", err)
return False
add_entities(
[
StamparPeludSensor(probe, variable, name, station_id, station_name)
for variable in config[CONF_ALLERGENS]
],
True,
)
class StamparPeludSensor(SensorEntity):
"""Implementation of a Štampar Pelud sensor."""
def __init__(self, probe, variable, name, station_id, station_name):
"""Initialize the sensor."""
self.probe = probe
self.client_name = name
self.variable = variable
self.station_id = station_id
self.station_name = station_name
_LOGGER.debug("Initializing: %s", variable)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {SENSOR_TYPES[self.variable]['name_clean']}"
@property
def icon(self):
"""Return the name of the sensor."""
return SENSOR_TYPES[self.variable]["icon"]
@property
def state(self):
"""Return the state of the sensor."""
ret = None
l_data = self.probe.get_data(SENSOR_TYPES[self.variable]["name"])
#_LOGGER.debug("Found mapped data: %s", l_data)
if l_data:
if len(l_data["measurements"]) > 0:
first_measurement = l_data["measurements"][0]
if first_measurement:
if first_measurement["value"] != "":
ret = float(first_measurement["value"])
else:
ret = first_measurement["level"]
return ret
@property
def state_class(self):
"""Return the state_class of this entity, if any."""
return "measurement"
@property
def extra_state_attributes(self):
"""Return the state attributes."""
ret = {
ATTR_NAME: SENSOR_TYPES[self.variable]["name"],
ATTR_STATION: self.probe.get_location(),
ATTR_UPDATED: self.probe._last_update,
}
l_data = self.probe.get_data(SENSOR_TYPES[self.variable]["name"])
if l_data:
if len(l_data["measurements"]) > 0:
level_measurement = None
date_measurement = None
level_measurement_en = None
forecast = {}
first_measurement = l_data["measurements"][0]
if first_measurement:
level_measurement = first_measurement["level"]
date_measurement = first_measurement["date"]
for measurement in l_data["measurements"][1:]:
forecast[measurement["date"]] = measurement["level"]
ret = {
ATTR_NAME: SENSOR_TYPES[self.variable]["name"],
ATTR_LEVEL: level_measurement,
ATTR_DATE: date_measurement,
ATTR_STATION: self.probe.get_location(),
ATTR_UPDATED: self.probe._last_update,
ATTR_DESCRIPTION: l_data.get("description"),
ATTR_IMAGE: l_data.get("image"),
ATTR_FORECAST: forecast,
}
return ret
@property
def entity_picture(self):
ret = None
l_data = self.probe.get_data(SENSOR_TYPES[self.variable]["name"])
if l_data:
ret = l_data.get("small_image")
# ret = "/local/stampar_icons/" + SENSOR_TYPES[self.variable]["name"] + ".svg"
return ret
def update(self):
"""Delegate update to probe."""
self.probe.update()
class StamparPeludData:
"""The class for handling the data retrieval."""
_station_name = ""
_station_id = ""
_data = {}
_last_update = None
def __init__(self, station_id, station_name):
"""Initialize the probe."""
self._station_name = station_name
self._station_id = station_id
self._data = {}
_LOGGER.debug("Initialized sensor data: %s, %s", station_id, station_name)
def current_situation(self):
"""Fetch and parse the latest XML data."""
try:
# Return structure with received parsed data
# title - station title as received from API
# min_date - minimum date of measurements
# max_first_date - maximum first measurement date
# plants: {
# plant_key: { - HR name of plant, from API
# title - name fo plant, from API
# small_image - small image URL
# image - big image URL
# description - long plant description
# measurements: [
# date - date in noted format (first date is measurement, others are forecasts)
# level - levels: nema peludi, niska, umjerena, visoka, jako visoka
# value - measured value, or medium is only level given
# ]
# }
# }
elem = {
"title": "",
"min_date": None,
"max_first_date": None,
"plants": {},
}
_LOGGER.debug("Refreshing current_situation")
l_headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Mozilla/5.0",
"Origin": "https://stampar.hr",
"Referer": "https://stampar.hr/hr/peludna-prognoza"
}
l_data = {
"title": self._station_id,
"view_name": "peludna_prognoza",
"view_display_id": "block_1",
}
r = requests.post("https://stampar.hr/hr/views/ajax?_wrapper_format=drupal_ajax", headers=l_headers, data=l_data)
l_response = r.json()
for i_response in l_response:
if "data" in i_response:
if "view-peludna-prognoza" in i_response["data"]:
l_data = i_response["data"]
#_LOGGER.debug("Received data response: %s", l_data)
s = BeautifulSoup(l_data, "lxml")
l_title = ""
if s.find("div", class_="views-field-title"):
l_title = s.find("div", class_="views-field-title").get_text("", strip=True)
_LOGGER.debug("Response title: %s", l_title)
elem["title"] = l_title
for plant in s.find_all("div", class_="paragraph--type--biljka-grupa"):
plant_title = plant.find("div", class_="biljka-naslov").get_text("", strip=True)
plant_key = plant_title.partition("(")[0].strip()
_LOGGER.debug("Found plant: %s, key: %s", plant_title, plant_key)
plant_big_img = ""
plant_small_img = ""
plant_desc = ""
if plant.img:
plant_big_img = "https://stampar.hr" + plant.img['src']
s_plant_desc = plant.find("div", class_="opis-biljke")
if s_plant_desc:
if s_plant_desc.img:
plant_small_img = "https://stampar.hr" + s_plant_desc.img["src"]
s_plant_desc_text = s_plant_desc.find("div", class_="field-type-text-with-summary")
if s_plant_desc_text:
plant_desc = s_plant_desc_text.get_text("", strip=True)
_LOGGER.debug("Plant small image: %s, image: %s, and descritpion: %s", plant_small_img, plant_big_img, plant_desc)
d_measurements = []
d_first_measurement = None
for measurement in plant.find_all("div", class_="mjerenje-container"):
date_measurement = measurement.find("div", class_="field-field-datum-mjerenja").find("div", class_="field-item").get_text("", strip=True)
_LOGGER.debug("Found measurement of date: %s", date_measurement)
d_date_measurement = datetime.strptime(date_measurement, '%d.%m.%Y.').date()
## logic excluded, let sensor store all fetched data, filtering of only future dates should be done in frontend
# if d_date_measurement < date.today():
# _LOGGER.debug("Found measurement date is in past, ignoring it: %s < %s", d_date_measurement , date.today())
# else:
level_measurement = ""
s_level_measurement = measurement.find("div", class_="field-field-vrijednost-tekst")
if s_level_measurement:
level_measurement = s_level_measurement.find("div", class_="field-item").get_text("", strip=True)
value_measurement = ""
s_value = measurement.find("div", class_="field-field-vrijednost")
if s_value:
value_measurement = s_value.find("div", class_="field-item").get_text("", strip=True)
_LOGGER.debug("Original level: %s and value: %s", level_measurement, value_measurement)
if level_measurement == "" and value_measurement != "" and not value_measurement.replace(".","").isdigit():
level_measurement = value_measurement
value_measurement = ""
_LOGGER.debug("Aligned level: %s and value: %s", level_measurement, value_measurement)
d_measurements.append( {
"date": date_measurement,
"level": level_measurement,
"value": value_measurement
} )
if not d_first_measurement:
d_first_measurement = d_date_measurement
if not elem["max_first_date"] or d_first_measurement > elem["max_first_date"]:
elem["max_first_date"] = d_first_measurement
if not elem["min_date"] or d_date_measurement < elem["min_date"]:
elem["min_date"] = d_date_measurement
elem["plants"][plant_key] = {
"title": plant_title,
"small_image": plant_small_img,
"image": plant_big_img,
"description": plant_desc,
"measurements": d_measurements
}
s_major = plant.find("div", class_="prevladava-container")
if s_major:
for major in s_major.find_all("article"):
major_plant = major.find("span").get_text("", strip=True)
major_plant_key = major_plant.partition("(")[0].strip()
_LOGGER.debug("Found major plant: %s, key: %s", major_plant, major_plant_key)
s_plant_desc_text = major.find("div", class_="field-type-text-with-summary")
if s_plant_desc_text:
major_plant_desc = s_plant_desc_text.get_text("", strip=True)
if major.find("img"):
major_plant_small_img = "https://stampar.hr" + major.img["src"]
_LOGGER.debug("Major plant image: %s, and description: %s", major_plant_small_img, major_plant_desc)
elem["plants"][major_plant_key] = {
"title": major_plant,
"small_image": major_plant_small_img,
"image": "",
"description": major_plant_desc,
"measurements": d_measurements
}
return elem
except requests.ConnectionError as err:
_LOGGER.error("Requests Connection error: %s", err.strerror )
except requests.HTTPError as err:
_LOGGER.error("Requests HTTP error: %s", err.strerror )
except requests.Timeout as err:
_LOGGER.error("Requests Timeout error: %s", err.strerror )
except requests.RequestException as err:
_LOGGER.error("Requests exception: %s", err.strerror )
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Štampar Peludna."""
if self._last_update and ( self._last_update + timedelta(hours=1) > datetime.now() ):
_LOGGER.debug("Skipping sensor data update, last_update was: %s", self._last_update)
return
## logic excluded, seems like data can be updated more then once per day
# if self._data.get("min_date") and self._data.get("min_date") >= date.today():
# _LOGGER.debug("Skipping sensor data update, already have data for today: %s >= %s", self._data.get("min_date"), date.today() )
# return
_LOGGER.debug("Doing sensor data update, last_update was: %s", self._last_update)
s_current_data = self.current_situation()
if s_current_data:
self._data = s_current_data
_LOGGER.debug("Sensor, current data: %s", self._data)
self._last_update = datetime.now()
_LOGGER.debug("Setting last_update to: %s", self._last_update)
_LOGGER.debug("Updating - finished.")
def get_data(self, variable):
"""Get the data."""
return self._data["plants"].get(variable)
def get_location(self):
"""Get the location title."""
return self._data["title"]
def closest_station(lat, lon):
"""Return the ID of the closest station to our lat/lon."""
_LOGGER.debug("Closest station, lat: %s, lon: %s", lat, lon)
if lat is None or lon is None:
return
def comparable_dist(stat_id):
"""Calculate the pseudo-distance from lat/lon."""
station_lon = STATIONS[stat_id]["long"]
station_lat = STATIONS[stat_id]["lat"]
return (lat - station_lat) ** 2 + (lon - station_lon) ** 2
return min(STATIONS, key=comparable_dist)
|
"""
User group management
"""
from future.utils import python_2_unicode_compatible
from ...utils import shell
from ..base import State
@python_2_unicode_compatible
class Group(State):
"""
Group state
"""
# States
EXISTS = 'exist'
ABSENT = 'absent'
def __init__(self, name, state=EXISTS, gid=None, **kwargs):
super(Group, self).__init__(**kwargs)
self.name = name
self.state = state
self.gid = gid
def __str__(self):
return '{name} ({gid})'.format(
name=self.name,
gid=self.gid or self.get_gid() or '-',
)
def get_gid(self):
ent = shell('getent group {}'.format(self.name), expect_errors=True)
if not ent:
return None
return int(ent.split(':')[-2])
def check(self):
self.report.debug('checking')
gid = self.get_gid()
self.report.debug('gid {}'.format(gid))
if gid:
if self.state == self.EXISTS:
self.report.debug('Already exists')
if self.gid and self.gid != gid:
raise ValueError('Group gid does not match system')
return True
return False
else:
if self.state == self.ABSENT:
self.report.debug('Does not exist')
return True
self.report.debug('Exists but should not')
# TODO: Check that the group is empty - can't delete without
return False
def apply(self):
gid = self.get_gid()
if gid:
if self.state == self.ABSENT:
self.report.info('Removing')
shell(['delgroup', self.name])
else:
if self.state == self.EXISTS:
self.report.info('Creating')
cmd = ['addgroup']
if self.gid:
cmd.extend(['--gid', self.gid])
cmd.append(self.name)
shell(cmd)
|
from nestedListContains import nestedListContains
def test_one():
assert not nestedListContains([], 3)
def test_two():
assert nestedListContains([1,2,3,4,5], 5)
def test_three():
assert nestedListContains([1,[2,3],4], 2)
def test_four():
assert not nestedListContains([1,[2,3],4], 5)
def test_five():
assert nestedListContains([1,[2,[12],3],4], 12)
|
# -*- coding: utf-8 -*-
import bpy
import math
from mmd_tools.bpyutils import Props, SceneOp
class MMDCamera:
def __init__(self, obj):
if obj.type == 'CAMERA':
obj = obj.parent
if obj and obj.type == 'EMPTY' and obj.mmd_type == 'CAMERA':
self.__emptyObj = getattr(obj, 'original', obj)
else:
raise ValueError('%s is not MMDCamera'%str(obj))
@staticmethod
def isMMDCamera(obj):
if obj.type == 'CAMERA':
obj = obj.parent
return obj and obj.type == 'EMPTY' and obj.mmd_type == 'CAMERA'
@staticmethod
def addDrivers(cameraObj):
def __add_ortho_driver(id_data, data_path, expression, index=-1):
d = id_data.driver_add(data_path, index)
d.driver.type = 'SCRIPTED'
if '$dis' in expression:
var = d.driver.variables.new()
var.name = 'camera_dis'
var.type = 'TRANSFORMS'
target = var.targets[0]
target.id = cameraObj
target.transform_type = 'LOC_Y'
target.transform_space = 'LOCAL_SPACE'
expression = expression.replace('$dis', var.name)
if '$type' in expression:
var = d.driver.variables.new()
var.name = 'camera_type'
var.type = 'SINGLE_PROP'
target = var.targets[0]
target.id_type = 'OBJECT'
target.id = cameraObj
target.data_path = 'data.type'
expression = expression.replace('$type', var.name)
d.driver.expression = expression
__add_ortho_driver(cameraObj.data, 'ortho_scale', '25*(-$dis if $dis<0 else $dis)/45')
__add_ortho_driver(cameraObj, 'rotation_euler', 'pi if $type == 1 and $dis > 1e-5 else 0', index=1)
@staticmethod
def removeDrivers(cameraObj):
if cameraObj.type != 'CAMERA':
return
cameraObj.data.driver_remove('ortho_scale')
cameraObj.driver_remove('rotation_euler')
@staticmethod
def __focus_object_get(cameraObj):
camera = cameraObj.data
data = getattr(camera, 'dof', camera)
return data.focus_object if hasattr(data, 'focus_object') else data.dof_object
@staticmethod
def __focus_object_set(cameraObj, focus):
camera = cameraObj.data
data = getattr(camera, 'dof', camera)
if hasattr(data, 'focus_object'):
data.focus_object = focus
else:
data.dof_object = focus
@staticmethod
def convertToMMDCamera(cameraObj, scale=1.0):
if MMDCamera.isMMDCamera(cameraObj):
return MMDCamera(cameraObj)
empty = bpy.data.objects.new(name='MMD_Camera', object_data=None)
SceneOp(bpy.context).link_object(empty)
cameraObj.parent = empty
cameraObj.data.sensor_fit = 'VERTICAL'
cameraObj.data.lens_unit = 'MILLIMETERS' # MILLIMETERS, FOV
cameraObj.data.ortho_scale = 25*scale
cameraObj.data.clip_end = 500*scale
setattr(cameraObj.data, Props.display_size, 5*scale)
cameraObj.location = (0, -45*scale, 0)
cameraObj.rotation_mode = 'XYZ'
cameraObj.rotation_euler = (math.radians(90), 0, 0)
cameraObj.lock_location = (True, False, True)
cameraObj.lock_rotation = (True, True, True)
cameraObj.lock_scale = (True, True, True)
MMDCamera.__focus_object_set(cameraObj, empty)
MMDCamera.addDrivers(cameraObj)
empty.location = (0, 0, 10*scale)
empty.rotation_mode = 'YXZ'
setattr(empty, Props.empty_display_size, 5*scale)
empty.lock_scale = (True, True, True)
empty.mmd_type = 'CAMERA'
empty.mmd_camera.angle = math.radians(30)
empty.mmd_camera.persp = True
return MMDCamera(empty)
@staticmethod
def newMMDCameraAnimation(cameraObj, cameraTarget=None, scale=1.0, min_distance=0.1):
scene = bpy.context.scene
mmd_cam = bpy.data.objects.new(name='Camera', object_data=bpy.data.cameras.new('Camera'))
SceneOp(bpy.context).link_object(mmd_cam)
MMDCamera.convertToMMDCamera(mmd_cam, scale=scale)
mmd_cam_root = mmd_cam.parent
_camera_override_func = None
if cameraObj is None:
if scene.camera is None:
scene.camera = mmd_cam
return MMDCamera(mmd_cam_root)
_camera_override_func = lambda: scene.camera
_target_override_func = None
if cameraTarget is None:
_target_override_func = lambda camObj: MMDCamera.__focus_object_get(camObj) or camObj
action_name = mmd_cam_root.name
parent_action = bpy.data.actions.new(name=action_name)
distance_action = bpy.data.actions.new(name=action_name+'_dis')
MMDCamera.removeDrivers(mmd_cam)
from math import atan
from mathutils import Matrix, Vector
from mmd_tools.bpyutils import matmul
render = scene.render
factor = (render.resolution_y*render.pixel_aspect_y)/(render.resolution_x*render.pixel_aspect_x)
matrix_rotation = Matrix(([1,0,0,0], [0,0,1,0], [0,-1,0,0], [0,0,0,1]))
neg_z_vector = Vector((0,0,-1))
frame_start, frame_end, frame_current = scene.frame_start, scene.frame_end+1, scene.frame_current
frame_count = frame_end - frame_start
frames = range(frame_start, frame_end)
fcurves = []
for i in range(3):
fcurves.append(parent_action.fcurves.new(data_path='location', index=i)) # x, y, z
for i in range(3):
fcurves.append(parent_action.fcurves.new(data_path='rotation_euler', index=i)) # rx, ry, rz
fcurves.append(parent_action.fcurves.new(data_path='mmd_camera.angle')) # fov
fcurves.append(parent_action.fcurves.new(data_path='mmd_camera.is_perspective')) # persp
fcurves.append(distance_action.fcurves.new(data_path='location', index=1)) # dis
for c in fcurves:
c.keyframe_points.add(frame_count)
for f, x, y, z, rx, ry, rz, fov, persp, dis in zip(frames, *(c.keyframe_points for c in fcurves)):
scene.frame_set(f)
if _camera_override_func:
cameraObj = _camera_override_func()
if _target_override_func:
cameraTarget = _target_override_func(cameraObj)
cam_matrix_world = cameraObj.matrix_world
cam_target_loc = cameraTarget.matrix_world.translation
cam_rotation = matmul(cam_matrix_world, matrix_rotation).to_euler(mmd_cam_root.rotation_mode)
cam_vec = matmul(cam_matrix_world.to_3x3(), neg_z_vector)
if cameraObj.data.type == 'ORTHO':
cam_dis = -(9/5) * cameraObj.data.ortho_scale
if cameraObj.data.sensor_fit != 'VERTICAL':
if cameraObj.data.sensor_fit == 'HORIZONTAL':
cam_dis *= factor
else:
cam_dis *= min(1, factor)
else:
target_vec = cam_target_loc - cam_matrix_world.translation
cam_dis = -max(target_vec.length * cam_vec.dot(target_vec.normalized()), min_distance)
cam_target_loc = cam_matrix_world.translation - cam_vec*cam_dis
tan_val = cameraObj.data.sensor_height/cameraObj.data.lens/2
if cameraObj.data.sensor_fit != 'VERTICAL':
ratio = cameraObj.data.sensor_width/cameraObj.data.sensor_height
if cameraObj.data.sensor_fit == 'HORIZONTAL':
tan_val *= factor*ratio
else: # cameraObj.data.sensor_fit == 'AUTO'
tan_val *= min(ratio, factor*ratio)
x.co, y.co, z.co = ((f, i) for i in cam_target_loc)
rx.co, ry.co, rz.co = ((f, i) for i in cam_rotation)
dis.co = (f, cam_dis)
fov.co = (f, 2*atan(tan_val))
persp.co = (f, cameraObj.data.type != 'ORTHO')
persp.interpolation = 'CONSTANT'
for kp in (x, y, z, rx, ry, rz, fov, dis):
kp.interpolation = 'LINEAR'
MMDCamera.addDrivers(mmd_cam)
mmd_cam_root.animation_data_create().action = parent_action
mmd_cam.animation_data_create().action = distance_action
scene.frame_set(frame_current)
return MMDCamera(mmd_cam_root)
def object(self):
return self.__emptyObj
def camera(self):
for i in self.__emptyObj.children:
if i.type == 'CAMERA':
return i
raise Exception
|
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os, os.path as path, shutil, tempfile
from bes.testing.unit_test import unit_test
from bes.fs.testing.temp_content import temp_content as I
from bes.testing.unit_test_skip import skip_if_not_unix
class test_temp_content(unit_test):
def test_parse_string(self):
self.assertEqual( I( I.FILE, 'foo.txt', 'this is foo\nhaha', None ), I.parse('file foo.txt "this is foo\nhaha"') )
self.assertEqual( ( I.FILE, 'foo.txt', None, None ), I.parse('file foo.txt') )
self.assertEqual( ( I.DIR, 'foo', None, None ), I.parse('dir foo') )
self.assertEqual( ( I.FILE, 'foo.txt', None, 0o755 ), I.parse('file foo.txt "" 755') )
def test_parse_tuple(self):
self.assertEqual( ( I.FILE, 'foo.txt', None, None ), I.parse( ('file', 'foo.txt' ) ) )
self.assertEqual( ( I.FILE, 'foo.txt', 'this is foo\nhaha', None ), I.parse( ('file', 'foo.txt', 'this is foo\nhaha') ) )
def test_write(self):
i = I(I.FILE, 'foo.txt', 'this is foo\nhaha', 0o644)
tmp_dir = tempfile.mkdtemp()
i.write(tmp_dir)
p = path.join(tmp_dir, 'foo.txt')
self.assertTrue( path.exists(p) )
with open(p, 'r') as fin:
self.assertEqual( 'this is foo\nhaha', fin.read() )
shutil.rmtree(tmp_dir)
def test_write_with_filename(self):
tmp_file = self.make_temp_file(content = 'this is foo\nhaha')
i = I(I.FILE, 'foo.txt', 'file:{}'.format(tmp_file), 0o644)
tmp_dir = tempfile.mkdtemp()
i.write(tmp_dir)
p = path.join(tmp_dir, 'foo.txt')
self.assertTrue( path.exists(p) )
with open(p, 'r') as fin:
self.assertEqual( 'this is foo\nhaha', fin.read() )
shutil.rmtree(tmp_dir)
@skip_if_not_unix
def test_write_mode(self):
i = I(I.FILE, 'foo.txt', 'this is foo\nhaha', 0o644)
tmp_dir = tempfile.mkdtemp()
i.write(tmp_dir)
p = path.join(tmp_dir, 'foo.txt')
self.assertTrue( path.exists(p) )
self.assertEqual( 0o644, os.stat(p).st_mode & 0o777 )
def test_write_dir(self):
tmp_dir = tempfile.mkdtemp()
I.parse('d mydir').write(tmp_dir)
self.assertTrue( path.isdir(path.join(tmp_dir, 'mydir')) )
shutil.rmtree(tmp_dir)
def test_parse_sequence(self):
expected = (
( 'file', 'foo.txt', 'foo content', 0o755 ),
( 'file', 'bar.txt', 'bar content', 0o644 ),
( 'dir', 'baz', None, 0o700 ),
)
self.assertEqual( expected, I.parse_sequence([
'file foo.txt "foo content" 755',
'file bar.txt "bar content" 644',
'dir baz "" 700',
]) )
def test_write_items(self):
tmp_dir = tempfile.mkdtemp()
I.write_items([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
], tmp_dir)
self.assertTrue( path.isfile(path.join(tmp_dir, self.p('a/b/c/foo.txt'))) )
self.assertTrue( path.isfile(path.join(tmp_dir, self.p('d/e/bar.txt'))) )
self.assertTrue( path.isdir(path.join(tmp_dir, self.p('baz'))) )
shutil.rmtree(tmp_dir)
def test_write_items_with_parse(self):
items = I.parse_sequence([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
tmp_dir = tempfile.mkdtemp()
I.write_items(items, tmp_dir)
self.assertTrue( path.isfile(path.join(tmp_dir, self.p('a/b/c/foo.txt'))) )
self.assertTrue( path.isfile(path.join(tmp_dir, self.p('d/e/bar.txt'))) )
self.assertTrue( path.isdir(path.join(tmp_dir, self.p('baz'))) )
shutil.rmtree(tmp_dir)
if __name__ == "__main__":
unit_test.main()
|
import sys
sys.path.insert(0, './python/')
import caffe
import numpy as np
from lcg_random import lcg_rand
import ncs
from easydict import EasyDict as edict
# model files
proto='./models/lenet5/lenet_train_test.prototxt'
weights='./models/lenet5/caffe_lenet5_original.caffemodel'
solver_path='./models/lenet5/lenet_solver.prototxt'
es_method='ncs'
# cpu/gpu
caffe.set_mode_gpu()
caffe.set_device(0)
# init solver
solver = caffe.SGDSolver(solver_path)
# basic parameters
# accuracy constraint for pruning
acc_constrain=0.05
# stop iteration count
#niter = 20501
niter = 30001
# stop pruning iteration count
prune_stop_iter = 15000
# the list of layer names
layer_name = ['conv1','conv2','ip1', 'ip2']
# the dict of layer names to its arrary indices
layer_inds = {'conv1':0, 'conv2':1, 'ip1':2, 'ip2':3}
# the dict of crates for each layer
#crates = {'conv1':1.95, 'conv2':3.35, 'ip1':3.7, 'ip2':2.8}
#crates = {'conv1':-1.54, 'conv2':-1.40, 'ip1':-1.63, 'ip2':-1.37}
crates = {'conv1':0.001, 'conv2':0.001, 'ip1':0.001, 'ip2':0.001}
# the list of the crates
#crates_list = [1.95, 3.35, 3.7, 2.8]
#crates_list = [-1.54, -1.40, -1.63, -1.37]
crates_list = [0.001, 0.001, 0.001, 0.001]
# the gamma for each layer
gamma = {'conv1':0.00002, 'conv2':0.00002, 'ip1':0.0002, 'ip2':0.0002}
gamma_star = 0.0002
ncs_stepsize = 50
# random see for numpy.random
seed=np.random.randint(1000000)
seed=981118#seed 93306,124x,0.04;78011, 127x, 0.05,430000,150x, 515769,185x
np.random.seed([seed])
# the dict to store intermedia results
es_cache = {}
#retrieval_tag=[]
r_count=0
# load the pretrained caffe model
if weights:
solver.net.copy_from(weights)
# definition of many axuliliary methods
# run the network on its dataset
def test_net(thenet, _start='mnist', _count=1):
'''
thenet: the object of network
_start: the layer to start from
_count: the number of batches to run
'''
scores = 0
for i in range(_count):
thenet.forward(start=_start)
scores += thenet.blobs['accuracy'].data
return scores/_count
# Set the crates of each layer, the pruning will happen in the next forward action
def apply_prune(thenet, _crates):
'''
thenet: the model to be pruned
_crates: the list of crates for layers
'''
for _id in range(len(layer_name)):
if _crates[_id] < 0:
continue
layer_id = layer_name[_id]
mask0 = thenet.params[layer_id][2].data.ravel()[0]
if mask0 == 0:
thenet.params[layer_id][2].data.ravel()[0] = -_crates[_id]
elif mask0 == 1:
thenet.params[layer_id][2].data.ravel()[0] = 1+_crates[_id]
else:
pdb.set_trace()
# calcuate the sparsity of a network model
def get_sparsity(thenet):
'''
thenet: the network for checking
'''
remain = 0
total = 0
for layer_id in layer_name:
remain += len(np.where(thenet.params[layer_id][2].data != 0)[0])
remain += len(np.where(thenet.params[layer_id][3].data != 0)[0])
total += thenet.params[layer_id][0].data.size
total += thenet.params[layer_id][1].data.size
#return total*1./(100.*remain)
return remain*1./total
# evaluate the accuracy of a network with a set of crates respect to a original accuracy
def evaluate(thenet, x_set, batchcount=1, accuracy_ontrain=0.9988):
fitness=[]
X=[]
for x in x_set:
x_fit = 1.1
apply_prune(thenet,x)
acc = test_net(thenet, _start='conv1', _count=batchcount)
if acc >= accuracy_ontrain - acc_constrain:
x_fit = get_sparsity(thenet)
fitness.append(x_fit)
X.append(x)
return (X, fitness)
#------mian--------------
solver.step(1)
# Adaptive dynamic surgery
for itr in range(niter):
#r = np.random.rand()
#if itr%500==0 and solver.test_nets[0].blobs['accuracy'].data >= 0.9909:
# retrieval_tag.append(itr)
tmp_crates=[]
tmp_ind = []
for ii in layer_name:
#tmp_crates.append(crates[ii]*(np.power(1+gamma[ii]*itr, -1)>np.random.rand()))
tmp_tag = np.power(1+gamma[ii]*itr, -1)>np.random.rand()
if tmp_tag:
tmp_ind.append(ii)
tmp_crates.append(tmp_tag*crates[ii])
if itr < 20000 and itr%10000 == 0:
ncs_stepsize = ncs_stepsize/10.
if itr%500 == 0:
print "Compression:{}, Accuracy:{}".format(1./get_sparsity(solver.net), test_net(solver.net, _count=1, _start="conv1"))
if len(tmp_ind)>0 and itr < prune_stop_iter:# run at window @6
_tmp_c = np.array(len(crates_list)*[-1.])
for t_name in tmp_ind:
_tmp_c[layer_inds[t_name]] = crates[t_name]
apply_prune(solver.net, _tmp_c)
#if len(tmp_ind)>1 and itr < prune_stop_iter:
if itr%1000==0 and len(tmp_ind)>1 and itr < prune_stop_iter:# run at window @3
accuracy_ = test_net(solver.net, _count=1, _start="conv1")
es = {}
if es_method == 'ncs':
__C = edict()
__C.parameters = {'reset_xl_to_pop':False,'init_value':tmp_crates, 'stepsize':ncs_stepsize, 'bounds':[0.0, 20.], 'ftarget':0, 'tmax':1600, 'popsize':10, 'best_k':1}
es = ncs.NCS(__C.parameters)
print '***************NCS initialization***************'
tmp_x_ = np.array(crates_list)
tmp_input_x = tmp_crates
for _ii in range(len(tmp_ind)):
tmp_x_[layer_inds[tmp_ind[_ii]]] = tmp_input_x[_ii]
_,tmp_fit = evaluate(solver.net, [tmp_x_], 1, accuracy_)
es.set_initFitness(es.popsize*tmp_fit)
print 'fit:{}'.format(tmp_fit)
print '***************NCS initialization***************'
while not es.stop():
x = es.ask()
X = []
for x_ in x:
tmp_x_ = np.array(crates_list)
for _ii in range(len(tmp_ind)):
tmp_x_[layer_inds[tmp_ind[_ii]]] = x_[_ii]
X.append(tmp_x_)
X_arrange,fit = evaluate(solver.net, X, 1, accuracy_)
X = []
for x_ in X_arrange:
tmp_x_ = np.array(len(tmp_ind)*[0.])
for _ii in range(len(tmp_ind)):
tmp_x_[_ii]= x_[layer_inds[tmp_ind[_ii]]]
X.append(tmp_x_)
#print X,fit
es.tell(X, fit)
#es.disp(100)
for _ii in range(len(tmp_ind)):
crates_list[layer_inds[tmp_ind[_ii]]] = es.result()[0][_ii]
for c_i in range(len(crates_list)):
crates[layer_name[c_i]] = crates_list[c_i]
es_cache[itr]={'compression':-es.result()[1], 'crates':crates_list[:]}
_tmp_c = np.array(len(crates_list)*[-1.])
for t_name in tmp_ind:
_tmp_c[layer_inds[t_name]] = crates[t_name]
apply_prune(solver.net, crates_list)
solver.step(1)
# record
import datetime
now = datetime.datetime.now()
time_styled = now.strftime("%Y-%m-%d %H:%M:%S")
out_ = open('record_{}.txt'.format(time_styled), 'w')
for key,value in es_cache.items():
out_.write("Iteration[{}]:\t{}x\t{}\n".format(key,value['compression'],value['crates']))
out_.close()
print 'random seed:{}'.format(seed)
#print "Retrieval accuracy @ iteration {}".format(retrieval_tag)
# save final model
#solver.net.save('./models/letnet5/9_letnet5_iter_{}.caffemodel'.format(itr+1))
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class ResPartner(models.Model):
_inherit = 'res.partner'
team_id = fields.Many2one(
'crm.team', 'Sales Team',
help='If set, sale team used notably for sales and assignations related to this partner')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.