content
stringlengths 5
1.05M
|
|---|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from tempeh.configurations import datasets, models
# Helpers to restrict execution to single combinations via environment variables
# to parallelize execution through devops system.
def get_selected_datasets():
selected_dataset_name = os.getenv("TEST_DATASET")
if selected_dataset_name is None:
print("No specific dataset selected - using all available.")
return datasets.values()
print("dataset '{}' selected.".format(selected_dataset_name))
return [datasets[selected_dataset_name]]
def get_selected_models():
selected_model_name = os.getenv("TEST_MODEL")
print(selected_model_name)
if selected_model_name is None:
print("No specific model selected - using all available.")
return models.values()
print("model '{}' selected.".format(selected_model_name))
return [models[selected_model_name]]
|
from ost import io, seq
from promod3 import modelling, loop
from pathlib import Path
from sys import argv
# this will work to fetch the pdb file, with the name of the pdb passed in the dommand line
def get_id(dir):
path = dir+'/var/pdbID.txt'
pdb = ''
with open(path,'r') as p:
pdb=p.read()
pdb = pdb.split('.')
return pdb
def get_fasta(pdb,dir):
#for res in pdb.residues:
#print(''.join([res.one_letter_code for r in pdb.residues]))
# for some reason, this does not work
res_list=[]
for res in pdb.residues:
o_res = res.one_letter_code
if o_res != '?':
res_list.append(o_res)
res_string = ''.join(res_list)
path = Path(dir+'/var/template_from_PDB.fasta')
f = open(path,'w')
f.write(res_string)
f.close()
def get_pdb(pdb_tup,dir):
if len(pdb_tup)!=2:
id = pdb_tup[0]
chain = ''
else:
id = pdb_tup[0]
chain = pdb_tup[1]
p=io.LoadPDB(id,seqres=True,remote=True,remote_repo='pdb')
pdb=p[0]
pdb_path = dir+'/var/template.pdb'
if len(chain) != 0:
l = pdb.GetChainList()
print('Saving {} chain {} to file'.format(id,chain))
query = 'chain='+chain
pdb_crn = pdb.Select(query) # returns entityview object
io.SavePDB(pdb_crn,pdb_path)
pdb=io.LoadPDB(pdb_path)
#get_fasta(p,dir)
else:
io.SavePDB(pdb,pdb_path)
#get_fasta(pdb,dir)
#for res in pdb.residues:
# print(res.one_letter_code)
#print(''.join([res.one_letter_code for r in pdb.residues]))
'''
test part
'''
res_list=[]
for res in pdb.residues:
o_res = res.one_letter_code
if o_res != '?':
res_list.append(o_res)
res_string = ''.join(res_list)
path = Path(dir+'/var/template_from_PDB.fasta')
f = open(path,'w')
f.write(res_string)
f.close()
print('pdb saved to file')
if __name__ == '__main__':
dir = argv[1]
pdb_id = get_id(dir)
print('Getting pdb from database...')
pdb = get_pdb(pdb_id,dir)
print('extracting sequence from template...')
|
from drain.step import *
from drain import step
import numpy as np
import tempfile
class Scalar(Step):
def __init__(self, value):
Step.__init__(self, value=value)
def run(self):
return self.value
class Add(Step):
def run(self, *values):
return sum(values)
class Divide(Step):
def run(self, numerator, denominator):
return numerator / denominator
def test_run(drain_setup):
s = Add(inputs = [Scalar(value=value) for value in range(1,10)])
s.execute()
assert s.result == 45
def test_run_map_results():
s = Divide(inputs=[MapResults(
inputs=[Scalar(value=1), Scalar(value=2)],
mapping=['denominator', 'numerator'])])
s.execute()
assert s.result == 2
def test_map_results():
a = Scalar(1)
b = Scalar(2)
a.execute()
b.execute()
c = MapResults(inputs=[a,b], mapping=['a','b'])
c.execute()
assert c.result == {'a':1, 'b':2}
def test_map_results_dict():
a = Scalar(1)
b = Scalar(2)
a.execute()
b.execute()
c = MapResults(inputs=[a,b], mapping=['a','b'])
c.execute()
assert c.result == {'a':1, 'b':2}
def test_map_results_list():
a = Scalar([1,2])
a.execute()
c = MapResults(inputs=[a], mapping=[['a','b']])
c.execute()
assert c.result == {'a':1, 'b':2}
def test_map_results_default():
a = Scalar([1,2])
a.execute()
c = MapResults(inputs=[a], mapping=[MapResults.DEFAULT])
c.execute()
assert c.result == [1,2]
class DumpStep(Step):
def __init__(self, n, n_df, return_list):
# number of objects to return and number of them to be dataframes
# and whether to use a list or dict
if n_df == None:
n_df = n
Step.__init__(self, n=n, n_df=n_df, return_list=return_list)
self.target = True
def run(self):
l = ['a']*self.n + [pd.DataFrame(np.arange(5))]*self.n_df
if len(l) == 1:
return l[0]
if self.return_list:
return l
else:
d = {'k'+str(k):v for k,v in zip(range(len(l)), l)}
return d
def test_dump_joblib():
t = DumpStep(n=10, n_df=0, return_list=False)
t.execute()
r = t.result
t.dump()
t.load()
assert r == t.result
def test_dump_hdf_single():
t = DumpStep(n=0, n_df=1, return_list=False)
t.execute()
r = t.result
t.dump()
t.load()
assert r.equals(t.result)
def test_dump_hdf_list():
t = DumpStep(n=0, n_df=5, return_list=True)
t.execute()
r = t.result
t.dump()
t.load()
for a,b in zip(r,t.result):
assert a.equals(b)
def test_dump_hdf_dict():
t = DumpStep(n=0, n_df=5, return_list=False)
t.execute()
r = t.result
t.dump()
t.load()
for k in r:
assert r[k].equals(t.result[k])
def test_expand_inputs():
s = Step(a=1, b={'c':Step(c=2)})
assert step._expand_inputs(s) == {s, Step(c=2)}
def test_collect_kwargs():
s = Step(a=1, b={'c':Step(c=2)})
s.name = 'Step2'
assert step._collect_kwargs(s) == {
'Step2': {'a':1},
'Step': {'c':2}
}
def test_call():
s = Call(pd.DataFrame, data=[0,10])
t = Call(s, 'mean')
t.execute()
assert t.result.values[0] == 5
def test_get_reslt():
c = Call(dict, a=1)
d = GetItem(c, 'a')
d.execute()
assert d.result == 1
|
from flask import render_template
from app import app, db
from werkzeug.exceptions import HTTPException
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
@app.errorhandler(Exception)
def all_exception_handler(e):
""" 处理所有的 HTTP 错误 """
# 对于 HTTP 异常,返回自带的错误描述和状态码
# 这些异常类在 Werkzeug 中定义,均继承 HTTPException 类
# 500 未知异常
# result = render_template('error.html', description='Sorry, internal error.'), 500
result = None
if isinstance(e, HTTPException):
result = render_template('error.html',
description=e.description), e.code
return result # 返回响应和 状态码
|
import wikipedia
from birdy.models import Bird
a = Bird.objects.all()
for i in a:
b = wikipedia.page(i.name)
i.url = b.url
|
from model import photoshop
import os
from PIL import Image
if __name__ == "__main__":
ps1 = photoshop.Photoshop()
#path = '/Users/dennisping/Documents/image-processor-mvc/res/lowfi.jpg'
#path = '/Users/dennisping/Documents/image-processor-mvc/res/city_nezuko_by_eternal_s.jpg'
path = '/Users/dennisping/Documents/image-processor-mvc/res/kda-nightmarket-720.png'
#path = '/Users/dennisping/Documents/image-processor-mvc/res/kda-nightmarket-1080.png'
#path = '/Users/dennisping/Documents/image-processor-mvc/res/Jiufen.png'
ps1.load(path)
ps1.dmcColor(100)
myImage = ps1.getImage()
myImage.show()
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from livesettings import config_value
from satchmo_ext.productratings.queries import highest_rated
def display_bestratings(request, count=0, template='product/best_ratings.html'):
"""Display a list of the products with the best ratings in comments"""
if count is None:
count = config_value('PRODUCT','NUM_DISPLAY')
ctx = RequestContext(request, {
'products' : highest_rated(),
})
return render_to_response(template, ctx)
|
"""
methods to probe a WAV file for various kinds of production metadata.
Go to the documentation for wavinfo.WavInfoReader for more information.
"""
from .wave_reader import WavInfoReader
from .riff_parser import WavInfoEOFError
__version__ = '1.6.3'
__author__ = 'Jamie Hardt <jamiehardt@gmail.com>'
__license__ = "MIT"
|
thinkers = ['Plato','PlayDo','Gumby']
while True:
try:
thinker = thinkers.pop()
print(thinker)
except IndexError as e:
print("We tried to pop too many thinkers")
print(e)
break
|
import datetime
import asyncio
import gzip
import json
import urllib.parse
import bisect
from urllib.parse import parse_qs
import yarl
import pendulum
from ccxt import huobipro
from uxapi import register_exchange
from uxapi import UXSymbol
from uxapi import WSHandler
from uxapi import UXPatch
from uxapi import Queue
from uxapi import Awaitables
from uxapi.exchanges.ccxt.huobidm import huobidm
from uxapi.helpers import (
keysort,
hmac,
extend,
is_sorted
)
@register_exchange('huobi')
class Huobi:
def __init__(self, market_type, config):
if market_type in ('spot', 'margin'):
cls = Huobipro
else:
cls = Huobidm
cls.id = type(self).id
self._exchange = cls(market_type, config)
def __getattr__(self, attr):
return getattr(self._exchange, attr)
class Huobipro(UXPatch, huobipro):
def describe(self):
return self.deep_extend(super().describe(), {
'has': {
'fetchCurrencies': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'cancelOrders': True,
'cancelAllOrders': True,
},
'urls': {
'wsapi': {
'market': 'wss://api.huobi.pro/ws',
'private': 'wss://api.huobi.pro/ws/v2',
'private_aws': 'wss://api-aws.huobi.pro/ws/v2',
},
},
'wsapi': {
'market': {
'ticker': 'market.{symbol}.detail',
'ohlcv': 'market.{symbol}.kline.{period}',
'orderbook': 'market.{symbol}.depth.{level}',
'mbp': 'market.{symbol}.mbp.{level}',
'trade': 'market.{symbol}.trade.detail',
'bbo': 'market.{symbol}.bbo',
},
'private': {
'myorder': 'orders#{symbol}',
'accounts': 'accounts.update#{mode}',
'clearing': 'trade.clearing#{symbol}'
}
},
})
def fetch_accounts(self, params=None):
accounts = super().fetch_accounts(params or {})
if self.market_type != 'margin':
return accounts
results = []
for account in accounts:
if account['type'] == 'super-margin':
results.insert(0, account)
else:
results.append(account)
return results
def order_book_merger(self):
return HuobiproOrderBookMerger(self)
def _fetch_markets(self, params=None):
markets = super()._fetch_markets(params)
for market in markets:
market['type'] = 'spot'
return markets
def _create_order(self, uxsymbol, type, side, amount, price=None, params=None):
self.load_accounts()
account_type = self.accounts[0]['type']
source = f'{account_type}-api'
params = self.extend({'source': source}, params or {})
return super()._create_order(uxsymbol, type, side, amount, price, params)
def _cancel_orders(self, ids, uxsymbol, params):
params = params or {}
request = {
'order-ids': ids,
}
return self.privatePostOrderOrdersBatchcancel(self.extend(request, params))
def _cancel_all_orders(self, uxsymbol, params):
params = params or {}
request = {}
if not self.safe_string(params, 'account-id'):
self.loadAccounts()
request['account-id'] = self.accounts[0]['id']
if not self.safe_string(params, 'symbol'):
if uxsymbol:
request['symbol'] = self.convert_symbol(uxsymbol)
return self.privatePostOrderOrdersBatchCancelOpenOrders(
self.extend(request, params))
def convert_topic(self, uxtopic):
maintype = uxtopic.maintype
subtypes = uxtopic.subtypes
wsapi_type = self.wsapi_type(uxtopic)
template = self.wsapi[wsapi_type][maintype]
if maintype == 'accounts':
mode = subtypes[0] if subtypes else '1'
return template.format(mode=mode)
params = {}
uxsymbol = UXSymbol(uxtopic.exchange_id, uxtopic.market_type,
uxtopic.extrainfo)
if uxsymbol.name == '*':
params['symbol'] = '*'
else:
params['symbol'] = self.market_id(uxsymbol)
if maintype in ['orderbook', 'mbp']:
if not subtypes:
assert maintype == 'orderbook'
params['level'] = 'step0'
elif subtypes[0] == 'full':
assert maintype == 'orderbook'
template = self.wsapi[wsapi_type]['mbp']
params['level'] = '150'
else:
params['level'] = subtypes[0]
if maintype == 'ohlcv':
params['period'] = self.timeframes[subtypes[0]]
return template.format(**params)
def wshandler(self, topic_set):
wsapi_types = {self.wsapi_type(topic) for topic in topic_set}
if len(wsapi_types) > 1:
raise ValueError('invalid topics')
wsapi_type = wsapi_types.pop()
wsurl = self.urls['wsapi'][wsapi_type]
return HuobiWSHandler(self, wsurl, topic_set, wsapi_type)
def wsapi_type(self, uxtopic):
for type in self.wsapi:
if uxtopic.maintype in self.wsapi[type]:
return type
raise ValueError('invalid topic')
class _HuobiOrderBookMerger:
def merge_asks_bids(self, snapshot_lst, patch_lst, price_lst, negative_price):
for item in patch_lst:
price, amount = item
if negative_price:
price = -price
i = bisect.bisect_left(price_lst, price)
if i != len(price_lst) and price_lst[i] == price:
if amount == 0:
price_lst.pop(i)
snapshot_lst.pop(i)
else:
snapshot_lst[i] = item
else:
if amount != 0:
price_lst.insert(i, price)
snapshot_lst.insert(i, item)
class HuobiproOrderBookMerger(_HuobiOrderBookMerger):
def __init__(self, exchange):
self.exchange = exchange
self.snapshot = None
self.topic = None
self.wsreq = None
self.wsreq_task = None
self.future = None
self.cache = []
self.prices = None
def __call__(self, patch):
if self.snapshot:
self.merge(patch)
return self.snapshot
if self.wsreq is None:
self.topic = patch['ch']
self.start_wsreq()
self.cache.append(patch)
if not self.future:
self.future = self.wsreq.request({
'req': self.topic
})
if not self.future.done():
raise StopIteration
snapshot = self.future.result()
self.future = None
seqnums = [item['tick']['prevSeqNum'] for item in self.cache]
snapshot_seq = snapshot['data']['seqNum']
assert is_sorted(seqnums)
i = bisect.bisect_left(seqnums, snapshot_seq)
if i != len(seqnums) and seqnums[i] == snapshot_seq:
self.stop_wsreq()
self.cache = self.cache[i:]
self.on_snapshot(snapshot)
return self.snapshot
raise StopIteration
def on_snapshot(self, snapshot):
self.snapshot = {
'ch': snapshot['rep'],
'tick': snapshot['data'],
}
self.prices = {
'asks': [item[0] for item in snapshot['data']['asks']],
'bids': [-item[0] for item in snapshot['data']['bids']]
}
for patch in self.cache:
self.merge(patch)
self.cache = None
def merge(self, patch):
snapshot_tick = self.snapshot['tick']
patch_tick = patch['tick']
if snapshot_tick['seqNum'] != patch_tick['prevSeqNum']:
raise RuntimeError('seqNum error')
snapshot_tick['seqNum'] = patch_tick['seqNum']
snapshot_tick['ts'] = patch['ts']
self.merge_asks_bids(snapshot_tick['asks'], patch_tick['asks'],
self.prices['asks'], False)
self.merge_asks_bids(snapshot_tick['bids'], patch_tick['bids'],
self.prices['bids'], True)
def start_wsreq(self):
self.wsreq = HuobiWSReq(self.exchange, 'market')
async def run():
try:
await self.wsreq.run()
except asyncio.CancelledError:
pass
self.wsreq_task = Awaitables.default().create_task(run(), 'wsreq')
def stop_wsreq(self):
self.wsreq_task.cancel()
self.wsreq_task = None
self.wsreq = None
class Huobidm(UXPatch, huobidm):
def __init__(self, market_type, config=None):
return super().__init__(market_type, extend({
'options': {'defaultType': market_type}
}, config or {}))
def describe(self):
return self.deep_extend(super().describe(), {
'deliveryHourUTC': 8,
'has': {
'fetchOrders': True,
'fetchOpenOrders': True,
'cancelAllOrders': True,
},
'urls': {
'wsapi': {
'market': {
'futures': 'wss://api.hbdm.com/ws',
'swap': 'wss://api.hbdm.com/swap-ws',
'swap.usdt': 'wss://api.hbdm.com/linear-swap-ws',
},
'private': {
'futures': 'wss://api.hbdm.com/notification',
'swap': 'wss://api.hbdm.com/swap-notification',
'swap.usdt': 'wss://api.hbdm.com/linear-swap-notification',
},
'index': 'wss://api.hbdm.com/ws_index',
},
},
'wsapi': {
'market': {
'ohlcv': 'market.{symbol}.kline.{period}',
'orderbook': 'market.{symbol}.depth.{level}',
'high_freq': 'market.{symbol}.depth.size_{level}.high_freq?data_type={data_type}', # noqa: E501
'ticker': 'market.{symbol}.detail',
'bbo': 'market.{symbol}.bbo',
'trade': 'market.{symbol}.trade.detail',
},
'private': {
'myorder': 'orders.{symbol}',
'accounts': 'accounts.{symbol}',
'position': 'positions.{symbol}',
'matchOrders': 'matchOrders.{symbol}',
'trigger_order': 'trigger_order.{symbol}',
},
'index': {
'index': 'market.{symbol}.index.{period}', # futures
'premium_index': 'market.{symbol}.premium_index.{period}', # swap
'estimated_rate': 'market.{symbol}.estimated_rate.{period}', # swap
'basis': 'market.{symbol}.basis.{period}.{basis_price_type}',
},
'public': {
'liquidation_orders': 'public.{symbol}.liquidation_orders',
'funding_rate': 'public.{symbol}.funding_rate', # swap
'contract_info': 'public.{symbol}.contract_info',
}
},
})
def order_book_merger(self):
return HuobidmOrderBookMerger()
def _fetch_markets(self, params=None):
markets = super()._fetch_markets(params)
for market in markets:
contract_value = self.safe_float(market['info'], 'contract_size')
market['contractValue'] = contract_value
if market['type'] == 'futures':
delivery_date = self.safe_string(market['info'], 'delivery_date')
if delivery_date:
delivery_time = pendulum.from_format(delivery_date, 'YYYYMMDD')
delivery_time = delivery_time.add(hours=self.deliveryHourUTC)
market['deliveryTime'] = delivery_time.to_iso8601_string()
else:
market['deliveryTime'] = None
return markets
def convert_symbol(self, uxsymbol):
market_type = uxsymbol.market_type
base, quote = uxsymbol.base_quote
if market_type == 'futures':
return f'{base}_{uxsymbol.contract_expiration}'
elif market_type == 'swap':
return f'{base}-{quote}'
elif market_type == 'swap.usdt':
return f'{quote}-{base}'
else:
raise ValueError(f'invalid symbol: {uxsymbol}')
def convert_topic(self, uxtopic):
wsapi_type = self.wsapi_type(uxtopic)
maintype = uxtopic.maintype
subtypes = uxtopic.subtypes
params = {}
if wsapi_type in ('private', 'public'):
params['symbol'] = uxtopic.extrainfo
elif wsapi_type == 'index':
params['symbol'] = uxtopic.extrainfo
params['period'] = self.timeframes[subtypes[0]]
if maintype == 'basis':
params['basis_price_type'] = subtypes[1]
elif wsapi_type == 'market':
uxsymbol = UXSymbol(uxtopic.exchange_id, uxtopic.market_type,
uxtopic.extrainfo)
params['symbol'] = self.market_id(uxsymbol)
if maintype == 'orderbook':
if not subtypes:
params['level'] = 'step0'
elif subtypes[0] == 'full':
maintype = 'high_freq'
params['level'] = '150'
params['data_type'] = 'incremental'
else:
params['level'] = subtypes[0]
elif maintype == 'high_freq':
assert subtypes and len(subtypes) == 2
params['level'] = subtypes[0]
params['data_type'] = subtypes[1]
elif maintype == 'ohlcv':
params['period'] = self.timeframes[subtypes[0]]
template = self.wsapi[wsapi_type][maintype]
return template.format(**params)
def wshandler(self, topic_set):
wsapi_types = {self.wsapi_type(topic) for topic in topic_set}
if len(wsapi_types) > 1:
raise ValueError('invalid topics')
wsapi_type = wsapi_types.pop()
if wsapi_type == 'index':
wsurl = self.urls['wsapi'][wsapi_type]
elif wsapi_type == 'public':
wsurl = self.urls['wsapi']['private'][self.market_type]
else:
wsurl = self.urls['wsapi'][wsapi_type][self.market_type]
return HuobiWSHandler(self, wsurl, topic_set, wsapi_type)
def wsapi_type(self, uxtopic):
for key in self.wsapi:
if uxtopic.maintype in self.wsapi[key]:
return key
raise ValueError('invalid topic')
class HuobidmOrderBookMerger(_HuobiOrderBookMerger):
def __init__(self):
self.snapshot = None
self.prices = None
def __call__(self, patch):
if patch['tick']['event'] == 'snapshot':
self.on_snapshot(patch)
elif patch['tick']['event'] == 'update':
self.merge(patch)
else:
raise ValueError('unexpected event')
return self.snapshot
def on_snapshot(self, snapshot):
self.snapshot = snapshot
self.prices = {
'asks': [item[0] for item in snapshot['tick']['asks']],
'bids': [-item[0] for item in snapshot['tick']['bids']]
}
def merge(self, patch):
self.snapshot['ts'] = patch['ts']
snapshot_tick = self.snapshot['tick']
patch_tick = patch['tick']
if snapshot_tick['version'] + 1 != patch_tick['version']:
raise RuntimeError('version error')
snapshot_tick.update({
'mrid': patch_tick['mrid'],
'id': patch_tick['id'],
'ts': patch_tick['ts'],
'version': patch_tick['version'],
})
self.merge_asks_bids(snapshot_tick['asks'], patch_tick['asks'],
self.prices['asks'], False)
self.merge_asks_bids(snapshot_tick['bids'], patch_tick['bids'],
self.prices['bids'], True)
class HuobiWSHandler(WSHandler):
def __init__(self, exchange, wsurl, topic_set, wsapi_type):
super().__init__(exchange, wsurl, topic_set)
self.wsapi_type = wsapi_type
self.market_type = exchange.market_type
def on_connected(self):
if self.market_type != 'spot' and self.wsapi_type == 'private':
self.pre_processors.append(self.on_error_message)
def on_error_message(self, msg):
if msg['op'] == 'close':
raise RuntimeError('server closed')
elif msg['op'] == 'error':
raise RuntimeError('invalid op or inner error')
else:
return msg
def create_keepalive_task(self):
self.keepalive_msq = Queue()
return super().create_keepalive_task()
async def keepalive(self):
while True:
msg = await self.keepalive_msq.get()
try:
while True:
msg = self.keepalive_msq.get_nowait()
except asyncio.QueueEmpty:
pass
if 'ping' in msg:
# {"ping": 18212558000}
pong = {'pong': msg['ping']}
elif msg.get('op') == 'ping':
# {"op": "ping", "ts": 1492420473058}
pong = {'op': 'pong', 'ts': msg['ts']}
else:
# {"action": "ping", "data": {"ts": 1575537778295}}
pong = {
'action': 'pong',
'data': {'ts': msg['data']['ts']}
}
await self.send(pong)
def on_keepalive_message(self, msg):
if ('ping' in msg or msg.get('op') == 'ping'
or msg.get('action') == 'ping'):
self.keepalive_msq.put_nowait(msg)
raise StopIteration
else:
return msg
@property
def login_required(self):
return 'private' in self.wsapi_type
def on_login_message(self, msg):
login_msg = False
login_ok = False
if msg.get('op') == 'auth':
login_msg = True
login_ok = (msg['err-code'] == 0)
elif msg.get('action') == 'req' and msg.get('ch') == 'auth':
login_msg = True
login_ok = (msg['code'] == 200)
if login_msg:
if login_ok:
self.logger.info(f'logged in')
self.on_logged_in()
raise StopIteration
else:
raise RuntimeError(f'login failed: {msg}')
return msg
def login_command(self, credentials):
signature_method = 'HmacSHA256'
apikey = credentials['apiKey']
now = datetime.datetime.utcnow()
timestamp = now.isoformat(timespec='seconds')
if self.market_type == 'spot':
params = keysort({
'signatureMethod': signature_method,
'signatureVersion': '2.1',
'accessKey': apikey,
'timestamp': timestamp
})
else:
params = keysort({
'SignatureMethod': signature_method,
'SignatureVersion': '2',
'AccessKeyId': apikey,
'Timestamp': timestamp
})
auth = urllib.parse.urlencode(params)
url = yarl.URL(self.wsurl)
payload = '\n'.join(['GET', url.host, url.path, auth])
signature_bytes = hmac(
bytes(credentials['secret'], 'utf8'),
bytes(payload, 'utf8'),
)
signature = signature_bytes.decode()
if self.market_type == 'spot':
params.update({
'authType': 'api',
'signature': signature
})
request = {
'action': 'req',
'ch': 'auth',
'params': params
}
else:
request = extend({
'op': 'auth',
'Signature': signature,
'type': 'api'
}, params)
return request
def create_subscribe_task(self):
topics = {}
for topic in self.topic_set:
converted = self.convert_topic(topic)
ch, params = self._split_params(converted)
topics[ch] = params
self.pre_processors.append(self.on_subscribe_message)
self.pending_topics = set(topics)
return self.awaitables.create_task(
self.subscribe(topics), 'subscribe')
def on_subscribe_message(self, msg):
sub_msg = False
sub_ok = False
topic = None
if 'subbed' in msg: # huobipro & huobidm market
sub_msg = True
sub_ok = (msg['status'] == 'ok')
topic = msg['subbed']
elif msg.get('op') == 'sub': # huobidm private
sub_msg = True
sub_ok = (msg['err-code'] == 0)
topic = msg['topic']
elif msg.get('action') == 'sub': # huobipro private
sub_msg = True
sub_ok = (msg['code'] == 200)
topic = msg['ch']
if sub_msg:
if sub_ok:
self.logger.info(f'{topic} subscribed')
self.on_subscribed(topic)
raise StopIteration
else:
raise RuntimeError(f'subscribe failed: {msg}')
return msg
def subscribe_commands(self, topics):
commands = []
for ch, params in topics.items():
if self.wsapi_type in ('private', 'public'):
if self.market_type == 'spot':
request = {'action': 'sub', 'ch': ch}
else:
request = {'op': 'sub', 'topic': ch}
else:
request = {'sub': ch}
request.update(params)
commands.append(request)
return commands
@staticmethod
def _split_params(topic):
ch, *params_string = topic.split('?', maxsplit=1)
params = parse_qs(params_string[0]) if params_string else {}
params = {k: lst[0] for k, lst in params.items()}
return ch, params
def decode(self, data):
# huobipro private return str not bytes
if isinstance(data, bytes):
msg = gzip.decompress(data).decode()
else:
msg = data
return json.loads(msg)
class HuobiWSReq(HuobiWSHandler):
def __init__(self, exchange, wsapi_type):
wsurl = exchange.urls['wsapi'][wsapi_type]
super().__init__(exchange, wsurl, None, wsapi_type)
self.queue = Queue()
self.future = None
self.timeout = 10.0 # in seconds
def on_prepared(self):
self.awaitables.create_task(self.sendreq(), 'sendreq')
async def do_run(self, collector):
await super().do_run(lambda r: self.future.set_result(r))
async def sendreq(self):
while True:
self.future, req = await self.queue.get()
await self.send(req)
await asyncio.wait_for(self.future, self.timeout)
def request(self, req):
loop = asyncio.get_running_loop()
future = loop.create_future()
self.queue.put_nowait((future, req))
return future
|
import typing as _t
from pathlib import Path as _Path
from .dependency import dependency as _dep
class NotPossibleToMinify(Exception): pass
class Minifier:
HTML = 'https://html-minifier.com/raw'
CSS = 'https://cssminifier.com/raw'
JS = 'https://javascript-minifier.com/raw'
EXTENSIONS = ('.html', '.css', '.js')
def __init__(self):
self._current_file: _Path = None
self._current_overwrite: bool = None
def __call__(self, fname: _Path, *, overwrite: bool = False) -> _Path:
return self.minify(fname, overwrite)
@property
def url_for(self) -> str:
suffix = self._current_file.suffix.lower()
if suffix == '.html':
return self.HTML
elif suffix == '.css':
return self.CSS
elif suffix == '.js':
return self.JS
raise NotPossibleToMinify(f'It is not possible to minify the file {self._current_file!s}.')
@property
def text(self) -> str:
with _dep.open_file(self._current_file) as f:
return f.read()
@property
def destination(self) -> _Path:
if self._current_overwrite:
return self._current_file
else:
return self._current_file.with_suffix(f'.min{self._current_file.suffix}')
def save(self, fname: _Path, text: str) -> _t.NoReturn:
with _dep.open_file(fname, 'w') as f:
f.write(text)
def minify(self, fname: _t.Union[str, _Path], overwrite: bool = False) -> _Path:
self._current_file = _dep.path_class(fname)
self._current_overwrite = overwrite
dest = self.destination
self.save(
fname=dest,
text=_dep.requests_post(self.url_for, data={'input': self.text}).text
)
return dest
def minify_to_text(self, fname: _t.Union[str, _Path]) -> str:
self._current_file = _dep.path_class(fname)
return _dep.requests_post(self.url_for, data={'input': self.text}).text
|
import os
from datetime import datetime, timedelta
import uuid
from .file_manager import FileManager
from .validation import file_exists
class Events(object):
def __init__(self, filename):
self.file_manager = FileManager(filename)
self.events = []
self.specific_events = []
def get_events(self):
if not file_exists(self.file_manager.filename):
self.events = []
self.__save_events()
return
self.events = self.__load_events()
if len(self.events) > 0:
self.events = sorted(
self.events, key=lambda e: datetime.strptime(e['date'], '%d.%m'))
cei = self.__get_closest_event_index(datetime.now())
self.events = self.events[cei:] + self.events[0:cei]
def get_specific_events(self, start=0, duration=14):
if not len(self.events) > 0:
self.specific_events = []
return
if not start <= duration:
print("Error! Incorrect values of arguments!")
self.specific_events = []
return
s_date = datetime.now() + timedelta(days=start)
si = self.__get_closest_event_index(s_date)
e_date = datetime.now() + timedelta(days=duration+1)
ei = self.__get_closest_event_index(e_date)
if si <= ei:
self.specific_events = self.events[si:ei]
else:
self.specific_events = self.events[si:] + self.events[0:ei]
def add_event(self, name, day, month, year):
event_id = str(uuid.uuid4())[0:5]
date_string = "%s.%s" % (day, month)
event = {}
event["id"], event["name"] = event_id, name
event["date"], event["year"] = date_string, year
self.events.append(event)
data = {}
data['events'] = self.events
self.file_manager.save_to_json_file(data)
def edit_event(self, event_id, selected_attribute, value):
events_with_id = [e for e in self.events if e["id"] == event_id]
if len(events_with_id) == 0:
print("There is no event with that id!")
elif len(events_with_id) == 1:
i = self.events.index(events_with_id[0])
self.events[i][selected_attribute] = value
self.__save_events()
print("Event was successfully changed!")
else:
print("There was an error during editing!")
def delete_event(self, event_id):
events_with_id = [e for e in self.events if e["id"] == event_id]
if len(events_with_id) == 0:
print("There is no event with that id!")
elif len(events_with_id) == 1:
self.events.remove(events_with_id[0])
self.__save_events()
print("Event was successfully deleted!")
else:
print("There was an error during event deletion!")
@staticmethod
def __count_dates_difference(date_string, date_string_1):
date = datetime.strptime(date_string, '%d.%m')
date_1 = datetime.strptime(date_string_1, '%d.%m')
difference = (date_1 - date).days
if not difference < 0:
return difference
else:
return difference + 365
def __get_closest_event_index(self, date):
date_string = "%s.%s" % (date.day, date.month)
closest_event = min(
self.events, key=lambda e: self.__count_dates_difference(date_string, e['date']))
return self.events.index(closest_event)
def __save_events(self):
data = {}
data['events'] = self.events
self.file_manager.save_to_json_file(data)
def __load_events(self):
events_attribute_name = 'events'
return self.file_manager.load_from_json_file(events_attribute_name)
def load_events_from_txt_file(self, filename):
if not file_exists(filename):
return []
lines = self.file_manager.load_lines_from_txt_file(filename)
names = lines[::3]
dates = lines[1::3]
if not len(names) == len(dates):
return []
for i in range(len(names)):
name = names[i].rstrip("\n")
sd = dates[i].rstrip("\n").split(".")
day, month, year = sd[0], sd[1], sd[2]
self.add_event(name, day, month, year)
self.__save_events()
return self.events
|
import math
from piecewise_polynomial_fitting import *
from normal_distribution import *
class semi_analytic_domain_integrator(object):
def __create_cached_moments(self, x, f):
n = x.shape[0]
self.__ys = numpy.zeros([n, 4])
self.__ys[2] = f.moments(4, x[2]) # cubic
for j in range(2, n-2):
self.__ys[j+1] = f.moments(4, x[j+1]) # cubic
def __rollback_(self, t, T, xt, xT, xtT, yT, regridder, integrator):
if len(xt.shape) <> len(xT.shape) or \
len(xT.shape) <> len(yT.shape) or \
len(xt.shape) <> 1 or len(xtT.shape) <> 1:
raise RuntimeError, 'expected one dimensional arrays'
nt = xt.shape[0]
nT = xT.shape[0]
ntT = xtT.shape[0]
if nt <> nT or ntT <> nT:
raise RuntimeError, 'expected array to be of same size'
if yT.shape[0] <> nT:
raise RuntimeError, \
'array yT has different number of points to xT'
yt = numpy.zeros(nt)
cT = piecewise_cubic_fit(xT, yT)
for i in range(nt):
# regrid
regrid_xT = numpy.zeros(nT)
xti = xt[i]
for j in range(nT):
regrid_xT[j] = xti+xtT[j]
regrid_yT = regridder(xT, cT, regrid_xT)
# polynomial fit
cs = piecewise_cubic_fit(xtT, regrid_yT)
# perform expectation
sum = 0
xl = xtT[2]
for j in range(2, nT-2): # somehow this should be enscapsulated
xh = xtT[j+1]
sum = sum + integrator(cs[:, j-2], xl, xh, self.__ys[j], self.__ys[j+1])
xl = xh
yt[i] = sum
if t == 0.0:
for j in range(1, nt):
yt[j] = yt[0]
break
return yt
def rollback(self, t, T, xt, xT, xtT, ftT, yT):
'''
>>> integrator = semi_analytic_domain_integrator()
>>> nt = 31
>>> nT = 31
>>> ntT = 31
>>> t = 0.5
>>> T = 1.0
>>> mut = 0.0
>>> muT = 0.0
>>> vol = 0.2
>>> volt = vol*math.sqrt(t)
>>> volT = vol*math.sqrt(T)
>>> ft = normal_distribution(mut, volt)
>>> fT = normal_distribution(muT, volT)
>>> xt = ft.state(5.5, nt)
>>> xT = fT.state(5.5, nT)
>>> meantT = muT-mut
>>> voltT = math.sqrt(volT*volT-volt*volt)
>>> ftT = normal_distribution(meantT, voltT)
>>> xtT = ftT.state(5.5, ntT)
>>> yT = numpy.zeros([nT])
>>> for i in range(nT): yT[i] = math.exp(xT[i]-0.5*volT*volT) # lognormal martingale
>>> yt = integrator.rollback(t, T, xt, xT, xtT, ftT, yT)
>>> print "%f, %f" % (yt[15], math.exp(xt[15]-0.5*volt*volt))
0.990049, 0.990050
>>> ns = 31
>>> nsT = 31
>>> s = 0
>>> mus = 0.0
>>> vols = 0.0
>>> fs = normal_distribution(mus, vols)
>>> xs = fs.state(5.5, ns)
>>> meansT = muT-mus
>>> volsT = math.sqrt(volT*volT-vols*vols)
>>> fsT = normal_distribution(meansT, volsT)
>>> xsT = fsT.state(5.5, nsT)
>>> ys = integrator.rollback(s, T, xs, xT, xsT, fsT, yT)
>>> meanst = mut-mus
>>> volst = math.sqrt(volt*volt-vols*vols)
>>> fst = normal_distribution(meanst, volst)
>>> nst = 31
>>> xst = fst.state(5.5, nst)
>>> ys1 = integrator.rollback(s, t, xs, xt, xst, fst, yt)
>>> for i in range(ns): print "%f, %f" % (ys[i], ys1[i])
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
0.999999, 0.999999
'''
# create cache of moments
self.__create_cached_moments(xtT, ftT)
return self.__rollback_(t, T, xt, xT, xtT, yT, ftT.regrid, ftT.integral)
def rollback_max(self, t, T, xt, xT, xtT, ftT, yT):
'''
>>> integrator = semi_analytic_domain_integrator()
>>> nT = 31
>>> t = 0.5
>>> T = 1.0
>>> mut = 0.0
>>> muT = 0.0
>>> vol = 0.2
>>> volt = vol*math.sqrt(t)
>>> volT = vol*math.sqrt(T)
>>> fT = normal_distribution(muT, volT)
>>> xT = fT.state(5.5, nT)
>>> yT = numpy.zeros([nT])
>>> for i in range(nT): yT[i] = math.exp(xT[i]-0.5*volT*volT) # lognormal martingale
>>> ns = 31
>>> nsT = 31
>>> s = 0
>>> mus = 0.0
>>> vols = 0.0
>>> fs = normal_distribution(mus, vols)
>>> xs = fs.state(5.5, ns)
>>> meansT = muT-mus
>>> volsT = math.sqrt(volT*volT-vols*vols)
>>> fsT = normal_distribution(meansT, volsT)
>>> xsT = fsT.state(5.5, nsT)
>>> for i in range(nT): yT[i] -= 1.0 # strike 1.0
>>> ys = integrator.rollback_max(s, T, xs, xT, xsT, fsT, yT)
>>> d1 = 0.5*volT
>>> for i in range(ns): print "%f, %f" % (ys[i], 2.0*fsT.unit_cdf(d1)-1.0)
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
0.079655, 0.079656
'''
# create cache of moments
self.__create_cached_moments(xtT, ftT)
return self.__rollback_(t, T, xt, xT, xtT, yT, ftT.regrid, ftT.integral_max)
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
import os
import glob
import nibabel as nib
import numpy as np
import shutil
from nipype.interfaces.ants import N4BiasFieldCorrection
from sklearn.feature_extraction.image import extract_patches as sk_extract_patches
from sklearn.utils import shuffle
import scipy.misc
num_mod = 2
def get_filename(set_name, case_idx, input_name, loc):
pattern = '{0}/{1}/{3}/subject-{2}-{3}.nii'
return pattern.format(loc, set_name, case_idx, input_name)
def get_set_name(case_idx):
return 'Training' if case_idx < 11 else 'Testing'
def read_data(case_idx, input_name, loc):
set_name = get_set_name(case_idx)
image_path = get_filename(set_name, case_idx, input_name, loc)
print(image_path)
return nib.load(image_path)
def read_vol(case_idx, input_name, dir):
image_data = read_data(case_idx, input_name, dir)
return image_data.get_data()
def correct_bias(in_file, out_file):
correct = N4BiasFieldCorrection()
correct.inputs.input_image = in_file
correct.inputs.output_image = out_file
done = correct.run()
return done.outputs.output_image
def normalise(case_idx, input_name, in_dir, out_dir,copy=False):
set_name = get_set_name(case_idx)
image_in_path = get_filename(set_name, case_idx, input_name, in_dir)
image_out_path = get_filename(set_name, case_idx, input_name, out_dir)
if copy:
shutil.copy(image_in_path, image_out_path)
else:
correct_bias(image_in_path, image_out_path)
print(image_in_path + " done.")
"""
To extract patches from a 3D image
"""
def extract_patches(volume, patch_shape, extraction_step,datype='float32'):
patch_h, patch_w, patch_d = patch_shape[0], patch_shape[1], patch_shape[2]
stride_h, stride_w, stride_d = extraction_step[0], extraction_step[1], extraction_step[2]
img_h, img_w, img_d = volume.shape[0],volume.shape[1],volume.shape[2]
N_patches_h = (img_h-patch_h)//stride_h+1
N_patches_w = (img_w-patch_w)//stride_w+1
N_patches_d = (img_d-patch_d)//stride_d+1
N_patches_img = N_patches_h * N_patches_w * N_patches_d
raw_patch_martrix = np.zeros((N_patches_img,patch_h,patch_w,patch_d),dtype=datype)
k=0
#iterator over all the patches
for h in range((img_h-patch_h)//stride_h+1):
for w in range((img_w-patch_w)//stride_w+1):
for d in range((img_d-patch_d)//stride_d+1):
raw_patch_martrix[k]=volume[h*stride_h:(h*stride_h)+patch_h,\
w*stride_w:(w*stride_w)+patch_w,\
d*stride_d:(d*stride_d)+patch_d]
k+=1
assert(k==N_patches_img)
return raw_patch_martrix
"""
To extract labeled patches from array of 3D labeled images
"""
def get_patches_lab(T1_vols, T2_vols, label_vols, extraction_step,
patch_shape,validating,testing,num_images_training):
patch_shape_1d=patch_shape[0]
# Extract patches from input volumes and ground truth
x = np.zeros((0, patch_shape_1d, patch_shape_1d, patch_shape_1d, num_mod),dtype="float32")
y = np.zeros((0, patch_shape_1d, patch_shape_1d, patch_shape_1d),dtype="uint8")
for idx in range(len(T1_vols)) :
y_length = len(y)
if testing:
print(("Extracting Patches from Image %2d ....")%(num_images_training+idx+2))
elif validating:
print(("Extracting Patches from Image %2d ....")%(num_images_training+idx+1))
else:
print(("Extracting Patches from Image %2d ....")%(1+idx))
label_patches = extract_patches(label_vols[idx], patch_shape, extraction_step,
datype="uint8")
# Select only those who are important for processing
if testing or validating:
valid_idxs = np.where(np.sum(label_patches, axis=(1, 2, 3)) != -1)
else:
valid_idxs = np.where(np.count_nonzero(label_patches, axis=(1, 2, 3)) > 6000)
# Filtering extracted patches
label_patches = label_patches[valid_idxs]
x = np.vstack((x, np.zeros((len(label_patches), patch_shape_1d,
patch_shape_1d, patch_shape_1d, num_mod),dtype="float32")))
y = np.vstack((y, np.zeros((len(label_patches), patch_shape_1d,
patch_shape_1d, patch_shape_1d),dtype="uint8")))
y[y_length:, :, :, :] = label_patches
# Sampling strategy: reject samples which labels are mostly 0 and have less than 6000 nonzero elements
T1_train = extract_patches(T1_vols[idx], patch_shape, extraction_step,datype="float32")
x[y_length:, :, :, :, 0] = T1_train[valid_idxs]
# Sampling strategy: reject samples which labels are mostly 0 and have less than 6000 nonzero elements
T2_train = extract_patches(T2_vols[idx], patch_shape, extraction_step,datype="float32")
x[y_length:, :, :, :, 1] = T2_train[valid_idxs]
return x, y
"""
To preprocess the labeled training data
"""
def preprocess_dynamic_lab(dir, seed, num_classes, extraction_step,patch_shape,num_images_training=2,
validating=False,testing=False,num_images_testing=7):
x = list(range(1,11))
if testing:
print("Testing")
index_start = num_images_training + 2
index_end = index_start + num_images_testing
T1_vols = np.empty((num_images_testing, 144, 192, 256),dtype="float32")
T2_vols = np.empty((num_images_testing, 144, 192, 256),dtype="float32")
label_vols = np.empty((num_images_testing, 144, 192, 256),dtype="uint8")
elif validating:
print("Validating")
index_start = num_images_training + 1
index_end = index_start + 1
T1_vols = np.empty((1, 144, 192, 256),dtype="float32")
T2_vols = np.empty((1, 144, 192, 256),dtype="float32")
label_vols = np.empty((1, 144, 192, 256),dtype="uint8")
else:
print("Training")
index_start = 1
index_end = index_start + num_images_training
T1_vols = np.empty((num_images_training, 144, 192, 256),dtype="float32")
T2_vols = np.empty((num_images_training, 144, 192, 256),dtype="float32")
label_vols = np.empty((num_images_training, 144, 192, 256),dtype="uint8")
i = 0
for index in range(index_start, index_end) :
print(x[index-1])
T1_vols[i, :, :, :] = read_vol(x[index-1], 'T1', dir)
T2_vols[i, :, :, :] = read_vol(x[index-1], 'T2', dir)
label_vols[i, :, :, :] = read_vol(x[index-1], 'label', dir)
i = i + 1
T1_mean = T1_vols.mean()
T1_std = T1_vols.std()
T1_vols = (T1_vols - T1_mean) / T1_std
T2_mean = T2_vols.mean()
T2_std = T2_vols.std()
T2_vols = (T2_vols - T2_mean) / T2_std
for i in range(T1_vols.shape[0]):
T1_vols[i] = ((T1_vols[i] - np.min(T1_vols[i])) /
(np.max(T1_vols[i])-np.min(T1_vols[i])))*255
for i in range(T2_vols.shape[0]):
T2_vols[i] = ((T2_vols[i] - np.min(T2_vols[i])) /
(np.max(T2_vols[i])-np.min(T2_vols[i])))*255
T1_vols = T1_vols/127.5 -1.
T2_vols = T2_vols/127.5 -1.
x,y=get_patches_lab(T1_vols,T2_vols,label_vols,extraction_step,patch_shape,validating=validating,
testing=testing,num_images_training=num_images_training)
print("Total Extracted Labelled Patches Shape:",x.shape,y.shape)
if testing:
return np.rollaxis(x, 4, 1), label_vols
elif validating:
return np.rollaxis(x, 4, 1), y, label_vols
else:
return np.rollaxis(x, 4, 1), y
"""
To extract labeled patches from array of 3D ulabeled images
"""
def get_patches_unlab(T1_vols, T2_vols, extraction_step,patch_shape,dir):
patch_shape_1d=patch_shape[0]
# Extract patches from input volumes and ground truth
label_ref= np.empty((1, 144, 192, 256),dtype="uint8")
x = np.zeros((0, patch_shape_1d, patch_shape_1d, patch_shape_1d, num_mod))
label_ref = read_vol(1, 'label', dir)
for idx in range(len(T1_vols)) :
x_length = len(x)
print(("Processing the Image %2d ....")%(idx+11))
label_patches = extract_patches(label_ref, patch_shape, extraction_step)
# Select only those who are important for processing
# Sampling strategy: reject samples which labels are mostly 0 and have less than 6000 nonzero elements
valid_idxs = np.where(np.count_nonzero(label_patches, axis=(1, 2, 3)) > 6000)
label_patches = label_patches[valid_idxs]
x = np.vstack((x, np.zeros((len(label_patches), patch_shape_1d,
patch_shape_1d, patch_shape_1d, num_mod))))
T1_train = extract_patches(T1_vols[idx], patch_shape, extraction_step,datype="float32")
x[x_length:, :, :, :, 0] = T1_train[valid_idxs]
T2_train = extract_patches(T2_vols[idx], patch_shape, extraction_step,datype="float32")
x[x_length:, :, :, :, 1] = T2_train[valid_idxs]
return x
"""
To preprocess the unlabeled training data
"""
def preprocess_dynamic_unlab(dir,extraction_step,patch_shape,num_images_training_unlab):
T1_vols = np.empty((num_images_training_unlab, 144, 192, 256),dtype="float32")
T2_vols = np.empty((num_images_training_unlab, 144, 192, 256),dtype="float32")
for case_idx in range(11, 11+num_images_training_unlab) :
T1_vols[(case_idx - 11), :, :, :] = read_vol(case_idx, 'T1', dir)
T2_vols[(case_idx - 11), :, :, :] = read_vol(case_idx, 'T2', dir)
#print(read_vol(case_idx, 'T2', dir).shape)
T1_mean = T1_vols.mean()
T1_std = T1_vols.std()
T1_vols = (T1_vols - T1_mean) / T1_std
T2_mean = T2_vols.mean()
T2_std = T2_vols.std()
T2_vols = (T2_vols - T2_mean) / T2_std
for i in range(T1_vols.shape[0]):
T1_vols[i] = ((T1_vols[i] - np.min(T1_vols[i])) /
(np.max(T1_vols[i])-np.min(T1_vols[i])))*255
for i in range(T2_vols.shape[0]):
T2_vols[i] = ((T2_vols[i] - np.min(T2_vols[i])) /
(np.max(T2_vols[i])-np.min(T2_vols[i])))*255
T1_vols = T1_vols/127.5 -1.
T2_vols = T2_vols/127.5 -1.
x=get_patches_unlab(T1_vols, T2_vols, extraction_step, patch_shape,dir)
print("Total Extracted Unlabeled Patches Shape:",x.shape)
return np.rollaxis(x, 4, 1)
|
import os
from datetime import datetime
from http import HTTPStatus
from json import JSONDecodeError
from math import floor
from typing import Callable, Type, Union
import inject
from flask import Flask, session, Response, request
from flask_sockets import Sockets
from geventwebsocket.websocket import WebSocket
from sqlalchemy import exists
from db.Connection import Connection
from db.tables.Post import Post
from db.tables.User import User as DB_User
from server.api.StatusCode import StatusCode
from server.datatypes.Message import MessageToSend, Message
from server.datatypes.Result import Result
from server.datatypes.User import User
from server.datatypes.UserIdPrincipal import UserIdPrincipal
from state.MessageSubscribersQueue import MessageSubscribersQueue
from utils.PasswordHasher import PasswordHasher
from utils.Serializable import Serializable
class Api:
@staticmethod
@inject.autoparams()
def init(flask: Flask) -> None:
flask.config["SESSION_COOKIE_NAME"] = Api.__COOKIE_SESSION_NAME
def route(path: str, **kwargs) -> Callable:
return flask.route(f"{Api.__API_PREFIX}{path}", endpoint=path, **kwargs)
@route("ping")
@Api.__need_authorization
@Api.__response_json
def ping() -> Serializable:
return Result(StatusCode.Success.name)
@route("registration", methods=["POST"])
@Api.__receive_or_bad_request(User)
@Api.__response_json
@inject.autoparams("connection")
def registration(user: User, connection: Connection) -> Union[Response, Serializable]:
if len(user.name) > 150:
return Response(status=HTTPStatus.BAD_REQUEST)
db_session = connection.serializable_session()
if db_session.query(exists().where(connection.User.user_name == user.name)).scalar():
return Result(StatusCode.UserAlreadyExist.name)
salt = os.urandom(32)
db_session.add(DB_User(user_name=user.name, password=PasswordHasher.get_hash(salt, user.password), salt=salt))
db_session.commit()
session[Api.__COOKIE_SESSION_NAME] = UserIdPrincipal(user.name)
return Result(StatusCode.Success.name)
@route("authorization", methods=["POST"])
@Api.__receive_or_bad_request(User)
@Api.__response_json
@inject.autoparams("connection")
def authorization(user: User, connection: Connection) -> Union[Response, Serializable]:
if len(user.name) > 150:
return Response(status=HTTPStatus.BAD_REQUEST)
db_session = connection.session()
db_user = db_session.query(DB_User).filter(DB_User.user_name == user.name).first()
if db_user is None or db_user.password != PasswordHasher.get_hash(db_user.salt, user.password):
return Result(StatusCode.InvalidUsernameOrPassword.name)
session[Api.__COOKIE_SESSION_NAME] = UserIdPrincipal(user.name)
return Result(StatusCode.Success.name)
@route("send_message", methods=["POST"])
@Api.__need_authorization
@Api.__receive_or_bad_request(MessageToSend)
@Api.__response_json
@inject.autoparams("connection")
def send_message(message: MessageToSend, connection: Connection) -> Union[Response, Serializable]:
if len(message.text) > 280:
return Response(status=HTTPStatus.BAD_REQUEST)
post = Post(datetime=datetime.utcnow(), author=session[Api.__COOKIE_SESSION_NAME]["name"], message=message.text)
db_session = connection.session()
db_session.add(post)
MessageSubscribersQueue.send_to_subscribers(Message(floor(post.datetime.timestamp()), post.author, post.message))
db_session.commit()
return Result(StatusCode.Success.value)
sockets = Sockets(flask)
@sockets.route(f"{Api.__API_PREFIX}message_socket")
@inject.autoparams("connection")
def message_socket(ws: WebSocket, connection: Connection) -> None:
if Api.__COOKIE_SESSION_NAME not in session:
ws.close()
return
db_session = connection.session()
for post in db_session.query(Post).all():
ws.send(Message(datetime=floor(post.datetime.timestamp()), author=post.author, text=post.message).serialize())
with MessageSubscribersQueue.subscribe(request, ws):
ws.receive()
__API_PREFIX = "/api/"
__COOKIE_SESSION_NAME = "SESSION_ID"
@staticmethod
def __receive_or_bad_request(t: Type[Serializable]) -> Callable:
def receive_or_bad_request(func: Callable) -> Callable:
def receive_or_bad_request_wrapper(*args, **kwargs) -> Response:
try:
return func(Serializable.deserialize(t, request.data.decode("utf-8")), *args, **kwargs)
except JSONDecodeError:
return Response(status=HTTPStatus.BAD_REQUEST)
return receive_or_bad_request_wrapper
return receive_or_bad_request
@staticmethod
def __need_authorization(func: Callable) -> Callable:
def need_authorization_wrapper(*args, **kwargs) -> Response:
return func(*args, **kwargs) if Api.__COOKIE_SESSION_NAME in session else Response(status=HTTPStatus.UNAUTHORIZED)
return need_authorization_wrapper
@staticmethod
def __response_json(func: Callable) -> Callable:
def content_json_wrapper(*args, **kwargs) -> Response:
result = func(*args, **kwargs)
return Response(response=result.serialize(), mimetype="application/json") if not isinstance(result, Response) else result
return content_json_wrapper
|
# -*- coding: utf-8 -*-
# from evaluate import strict, loose_macro, loose_micro
import logging
def f1(p, r):
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
def strict(true_and_prediction):
num_entities = len(true_and_prediction)
correct_num = 0.
for true_labels, predicted_labels in true_and_prediction:
correct_num += set(true_labels) == set(predicted_labels)
precision = recall = correct_num / num_entities
return precision, recall, f1(precision, recall)
def loose_macro(true_and_prediction):
num_entities = len(true_and_prediction)
p = 0.
r = 0.
for true_labels, predicted_labels in true_and_prediction:
if len(predicted_labels) > 0:
p += len(set(predicted_labels).intersection(set(true_labels))) / float(len(predicted_labels))
if len(true_labels):
r += len(set(predicted_labels).intersection(set(true_labels))) / float(len(true_labels))
precision = p / num_entities
recall = r / num_entities
return precision, recall, f1(precision, recall)
def loose_micro(true_and_prediction):
num_predicted_labels = 0.
num_true_labels = 0.
num_correct_labels = 0.
for true_labels, predicted_labels in true_and_prediction:
num_predicted_labels += len(predicted_labels)
num_true_labels += len(true_labels)
num_correct_labels += len(set(predicted_labels).intersection(set(true_labels)))
precision = num_correct_labels / num_predicted_labels
recall = num_correct_labels / num_true_labels
return precision, recall, f1(precision, recall)
def get_true_and_prediction(scores, y_data):
true_and_prediction = []
for score, true_label in zip(scores, y_data):
predicted_tag = []
true_tag = []
for label_id, label_score in enumerate(list(true_label)):
if label_score > 0:
true_tag.append(label_id)
lid, ls = max(enumerate(list(score)), key=lambda x: x[1])
predicted_tag.append(lid)
for label_id, label_score in enumerate(list(score)):
if label_score > 0.0:
if label_id != lid:
predicted_tag.append(label_id)
true_and_prediction.append((true_tag, predicted_tag))
return true_and_prediction
def acc_hook(scores, y_data):
true_and_prediction = get_true_and_prediction(scores, y_data)
logging.info(" strict (p,r,f1):{}".format(strict(true_and_prediction)))
logging.info("loose macro (p,r,f1): {}".format(loose_macro(true_and_prediction)))
logging.info("loose micro (p,r,f1): {}".format(loose_micro(true_and_prediction)))
a_ = strict(true_and_prediction)
print(" strict (p,r,f1):", strict(true_and_prediction))
print("loose macro (p,r,f1):", loose_macro(true_and_prediction))
print("loose micro (p,r,f1):", loose_micro(true_and_prediction))
return a_[-1]
def save_predictions(scores, y_data, id2label, fname):
true_and_prediction = get_true_and_prediction(scores, y_data)
with open(fname, "w") as f:
for t, p in true_and_prediction:
f.write("True values ===> " + "\t" + " ".join([id2label[id] for id in t]) + "\t" + "Predicted values ==> " + '\t'+ " ".join([id2label[id] for id in p]) + "\n")
f.close()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .deployment_group_reference import DeploymentGroupReference
class DeploymentGroup(DeploymentGroupReference):
"""DeploymentGroup.
:param id: Deployment group identifier.
:type id: int
:param name: Name of the deployment group.
:type name: str
:param pool: Deployment pool in which deployment agents are registered.
:type pool: :class:`TaskAgentPoolReference <task-agent.v4_1.models.TaskAgentPoolReference>`
:param project: Project to which the deployment group belongs.
:type project: :class:`ProjectReference <task-agent.v4_1.models.ProjectReference>`
:param description: Description of the deployment group.
:type description: str
:param machine_count: Number of deployment targets in the deployment group.
:type machine_count: int
:param machines: List of deployment targets in the deployment group.
:type machines: list of :class:`DeploymentMachine <task-agent.v4_1.models.DeploymentMachine>`
:param machine_tags: List of unique tags across all deployment targets in the deployment group.
:type machine_tags: list of str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'pool': {'key': 'pool', 'type': 'TaskAgentPoolReference'},
'project': {'key': 'project', 'type': 'ProjectReference'},
'description': {'key': 'description', 'type': 'str'},
'machine_count': {'key': 'machineCount', 'type': 'int'},
'machines': {'key': 'machines', 'type': '[DeploymentMachine]'},
'machine_tags': {'key': 'machineTags', 'type': '[str]'}
}
def __init__(self, id=None, name=None, pool=None, project=None, description=None, machine_count=None, machines=None, machine_tags=None):
super(DeploymentGroup, self).__init__(id=id, name=name, pool=pool, project=project)
self.description = description
self.machine_count = machine_count
self.machines = machines
self.machine_tags = machine_tags
|
from .mnist import MNIST_Dataset
from .fmnist import FashionMNIST_Dataset
from .cifar10 import CIFAR10_Dataset
from .odds import ODDSADDataset
def load_dataset(dataset_name, data_path, normal_class, known_outlier_class, n_known_outlier_classes: int = 0,
ratio_known_normal: float = 0.0, ratio_known_outlier: float = 0.0, ratio_pollution: float = 0.0,
random_state=None):
"""Loads the dataset."""
implemented_datasets = ('mnist', 'fmnist', 'cifar10',
'arrhythmia', 'cardio', 'satellite', 'satimage-2', 'shuttle', 'thyroid')
assert dataset_name in implemented_datasets
dataset = None
if dataset_name == 'mnist':
dataset = MNIST_Dataset(root=data_path,
normal_class=normal_class,
known_outlier_class=known_outlier_class,
n_known_outlier_classes=n_known_outlier_classes,
ratio_known_normal=ratio_known_normal,
ratio_known_outlier=ratio_known_outlier,
ratio_pollution=ratio_pollution)
if dataset_name == 'fmnist':
dataset = FashionMNIST_Dataset(root=data_path,
normal_class=normal_class,
known_outlier_class=known_outlier_class,
n_known_outlier_classes=n_known_outlier_classes,
ratio_known_normal=ratio_known_normal,
ratio_known_outlier=ratio_known_outlier,
ratio_pollution=ratio_pollution)
if dataset_name == 'cifar10':
dataset = CIFAR10_Dataset(root=data_path,
normal_class=normal_class,
known_outlier_class=known_outlier_class,
n_known_outlier_classes=n_known_outlier_classes,
ratio_known_normal=ratio_known_normal,
ratio_known_outlier=ratio_known_outlier,
ratio_pollution=ratio_pollution)
if dataset_name in ('arrhythmia', 'cardio', 'satellite', 'satimage-2', 'shuttle', 'thyroid'):
dataset = ODDSADDataset(root=data_path,
dataset_name=dataset_name,
n_known_outlier_classes=n_known_outlier_classes,
ratio_known_normal=ratio_known_normal,
ratio_known_outlier=ratio_known_outlier,
ratio_pollution=ratio_pollution,
random_state=random_state)
return dataset
|
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
raw_bert = hub.load('./kcbert-base/model/0')
raw_preprocess = hub.load('./kcbert-base/preprocess/0')
bert = hub.KerasLayer(raw_bert, trainable=True)
preprocess = hub.KerasLayer(raw_preprocess, arguments={"seq_length": 48})
input_node = tf.keras.Input([], dtype=tf.string)
preprocessed = preprocess(input_node)
output_node = bert(preprocessed)
model = tf.keras.Model(input_node, output_node)
model(tf.constant(['ㅋㅋㅋㅋㅋ 재밌다']))
tf.saved_model.save(model, './tmp/tmp-1')
print("=====================")
print("=====================")
print("=====================")
raw_preprocess = hub.load('./kcbert-base/preprocess/0')
preprocess = hub.KerasLayer(raw_preprocess.call_2, arguments={"seq_length": 48})
input_node = [tf.keras.Input([], dtype=tf.string), tf.keras.Input([], dtype=tf.string)]
preprocessed = preprocess(input_node)
output_node = bert(preprocessed)
model = tf.keras.Model(input_node, output_node)
model(
[
tf.constant(['ㅋㅋㅋㅋㅋ 재밌다']),
tf.constant(['뭐가??', '어떻게 되긴, 개발자 되는 거지 뭐'])
]
)
tf.saved_model.save(model, './tmp/tmp-2')
|
from kivy.lang.builder import Builder
from kivymd.uix.card import MDCard
Builder.load_string(
"""
<RoundButton>:
width: '55dp'
size_hint_x: None
elevation: 0
md_bg_color: app.theme_cls.opposite_bg_normal
radius: '10dp'
text: ''
ripple_behavior: True
theme_text: 'Custom'
MDLabel:
theme_text_color: root.theme_text
text_color: app.theme_cls.bg_normal
halign: 'center'
text: root.text
font_size: '12sp'
"""
)
class RoundButton(MDCard):
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
return True
else:
return super().on_touch_down(touch)
|
#%%
# First read in the datasets. One Graduate school admission dataset, one Titanic survival dataset
# need to load api_dsLand
# from turtle import color
import dm6103 as dm
dfadmit = dm.api_dsLand('gradAdmit')
# dm.dfChk(dfadmit, True)
#%%
# quick plots
import matplotlib.pyplot as plt
# add color
import numpy as np
admitcolors = np.where(dfadmit['admit']==1,'g','r')
# admitcolors[dfadmit['admit']==0] = 'r'
# admitcolors[dfadmit['admit']==1] = 'g'
print("\nReady to continue.")
#%%
# Plot, Pandas style (or seaborn sns)
dfadmit.plot(x="gre", y="gpa", kind="scatter", color=admitcolors)
plt.xlabel("GRE score")
plt.ylabel("GPA")
plt.title("GPA vs GRE")
# plt.savefig(filepath, dpi=96)
plt.show()
#%%
# OR Matplotlib focused
plt.scatter(dfadmit.gre,dfadmit.gpa, color=admitcolors)
plt.xlabel("GRE score")
plt.ylabel("GPA")
plt.title("GPA vs GRE")
plt.show()
#%%
# Note that the plot function here by default is a line plot
plt.plot(dfadmit.gre,dfadmit.gpa, 'r-o') # red, solid line, circle dots
plt.show()
#%%
# OR
# more object-oriented style
fig, axis = plt.subplots()
axis.plot(dfadmit.gre, dfadmit.gpa, color='g', linestyle="", marker="o", markersize=3)
plt.xlabel("GRE score")
plt.ylabel("GPA")
plt.title("GPA vs GRE")
# plt.savefig(filepath, dpi=96)
plt.show()
#%%
# easier to add jittering
fig, axis = plt.subplots(2,2)
axis[0,0].plot(dfadmit.gre, dfadmit.gpa, color='r', linestyle="", marker="^", markersize=3, alpha=0.3)
# axis[0,0].xlabel("GRE score")
# axis[0,0].ylabel("GPA")
axis[0,0].set_title("plain")
axis[0,0].xaxis.set_ticklabels([]) # get rid of x tick marks for clarity here
axis[0,1].plot(dfadmit.gre + np.random.uniform(0,10, size=dfadmit.shape[0] ), dfadmit.gpa, color='g', linestyle="", marker="o", markersize=3, alpha=0.3)
axis[0,1].set_title("jitter gre")
axis[0,1].xaxis.set_ticklabels([]) # get rid of x tick marks for clarity here
axis[1,0].plot(dfadmit.gre, dfadmit.gpa + np.random.uniform(0,.1, size=dfadmit.shape[0]), color='b', linestyle="", marker="+", markersize=3, alpha=0.4)
axis[1,0].set_title("jitter gpa")
axis[1,1].plot(dfadmit.gre + np.random.uniform(0,10, size=dfadmit.shape[0] ), dfadmit.gpa + np.random.uniform(0,.1, size=dfadmit.shape[0]), color='#555555', linestyle="", marker="x", markersize=3, alpha=0.5)
axis[1,1].set_title("jitter both")
# plt.xlabel("GRE score")
# plt.ylabel("GPA")
# plt.title("GPA vs GRE")
plt.savefig("quad_figs.png", dpi=96)
plt.show()
#%%
# seaborn sns
import seaborn as sns
sns.scatterplot(data=dfadmit, x='gre', y='gpa')
sns.despine()
#%%
# seaborn sns
# import seaborn as sns
sns.regplot('gre', 'gpa', data=dfadmit, fit_reg = False, x_jitter=10, scatter_kws={'alpha': 0.3, 's': 3 } )
sns.despine()
# easy
# lack some minor control such as what distribution to use
# can also use subplots, with different set of syntax
#%%
# seaborn sns
# import seaborn as sns
sns.regplot('gre', 'gpa', data=dfadmit, fit_reg = True, x_jitter=10, scatter_kws={'alpha': 0.3, 's': 3 }, line_kws = {'color':'red', 'label':'LM fit'})
sns.despine()
#%%
# seaborn sns
# import seaborn as sns
#
# https://datascience.stackexchange.com/questions/44192/what-is-the-difference-between-regplot-and-lmplot-in-seaborn
# regplot() performs a simple linear regression model fit and plot. lmplot() combines regplot() and FacetGrid.
# The FacetGrid class helps in visualizing the distribution of one variable as well as the relationship between multiple variables separately within subsets of your dataset using multiple panels.
# lmplot in particular has the hue option
sns.lmplot('gre', 'gpa', data=dfadmit, hue='admit', fit_reg = False, x_jitter=10, scatter_kws={'alpha': 0.3, 's': 3 })
sns.despine()
#%%
# seaborn sns
# import seaborn as sns
# lmplot also allows multiple series with different markers.
sns.lmplot('gre', 'gpa', data=dfadmit, hue='admit', markers=["o", "x"], fit_reg = True, x_jitter=10, scatter_kws={'alpha': 0.4, 's': 8 })
sns.despine()
#%%
# seaborn sns
# color by rank
# import seaborn as sns
sns.lmplot('gre', 'gpa', data=dfadmit, hue='rank', markers=['o', 'x', '^', 's'], fit_reg = False, x_jitter=10, scatter_kws={'alpha': 0.4, 's': 8 })
# markers=['o', 'x', 's','^','p','+','d']
sns.despine()
#%%
# Question,
# How many dimensions we can visualize?
#%%
# color by rank
# Let also try pandas,
# need to create the color label for each data point ourselves,
# but we can have color and shape separate
rankcolors = np.where(dfadmit['rank']==1,'r','-') # initialize the vector as well
# rankcolors[dfadmit['rank']==1] = 'r'
rankcolors[dfadmit['rank']==2] = 'g'
rankcolors[dfadmit['rank']==3] = 'b'
rankcolors[dfadmit['rank']==4] = 'yellow'
# and use different shape for admit 0 and 1
ax1 = dfadmit[dfadmit.admit==0].plot(x="gre", y="gpa", kind="scatter", color=rankcolors[dfadmit.admit==0], marker='o', label='rejected')
dfadmit[dfadmit.admit==1].plot(x="gre", y="gpa", kind="scatter", color=rankcolors[dfadmit.admit==1], marker='+', label='admitted', ax = ax1)
# dfadmit.plot(x="gre", y="gpa", kind="scatter", color=rankcolors, marker='+')
plt.legend(loc='upper left')
plt.xlabel("GRE score")
plt.ylabel("GPA")
plt.title("GPA vs GRE")
# plt.savefig(filepath, dpi=96)
plt.show()
#%%
# color by rank
# Try Matplotlib, so we can add jittering
fig, axis = plt.subplots()
for admitval, markerval in { 0: "o" , 1: "+" }.items() : # rejected (admit==0), use 'o', admitted (admit==1), use '+'
for cindex, cvalue in {1: 'r', 2: 'g', 3: 'b', 4: 'yellow' }.items() : # the ranks and colors
thisdf = dfadmit[dfadmit.admit==admitval] # first filter out admitted or rejected
thisdf = thisdf[thisdf['rank'] == cindex] # then filter out one rank at a time.
print(thisdf.shape)
axis.plot(thisdf.gre + np.random.uniform(0,10, size=thisdf.shape[0] ),
thisdf.gpa,
color=cvalue,
linestyle="",
marker=markerval,
markersize=3,
alpha=0.3
)
plt.xlabel("GRE score")
plt.ylabel("GPA")
plt.title("GPA vs GRE")
# plt.savefig(filepath, dpi=96)
plt.show()
#%%
# Now, your turn. Try some sensible plots with the Titanic dataset.
# How would you visualize the relations between survived, age, sex, fare, embarked?
# You do not need to use all of them in a single plot. What variables make the most sense to you,
# in terms of finding out who survived, and who didn't.
#
dftitan = dm.api_dsLand('Titanic')
# perform a quick clean up on age NAs
#%%
# Now LINEAR REGRESSION
# 1. Describe the model → ols()
# 2. Fit the model → .fit()
# 3. Summarize the model → .summary()
# 4. Make model predictions → .predict()
#%%
# FORMULA based
from statsmodels.formula.api import ols
modelGreGpa = ols(formula='gre ~ gpa', data=dfadmit)
print( type(modelGreGpa) )
#%%
modelGreGpaFit = modelGreGpa.fit()
print( type(modelGreGpaFit) )
print( modelGreGpaFit.summary() )
# From the summary, try to get as much info as we can
# Df Residuals (# total observations minus Df Model minus 1)
# Df Model (# of x variables)
# R-squared, what does that mean?
# Adj R-squared
# F-statistics
# Prob (F-statistics), ie. p-value for F-statistics
# Log-Likelihood
# AIC (model eval)
# BIC (model eval)
# coef
# std err
# t
# P>|t|, aka p-value for the coefficient significance
# 95% confidence intervals
# Omnibus - close to zero means residuals are normally distributed
# Prob(Omnibus) - close to 1 means residuals are normally distributed
# skew (positive is right tailed, negative is left)
# Kurtosis (tailedness, normal dist = 3, less than 3 is fatter tail, and flat top.)
print("\nReady to continue.")
#%%
import pandas as pd
modelpredicitons = pd.DataFrame( columns=['gre_GpaLM'], data= modelGreGpaFit.predict(dfadmit.gpa))
# use the original dataset gpa data to find the expected model values
print(modelpredicitons.shape)
print( modelpredicitons.head() )
print("\nReady to continue.")
#%%
# Next let us try more variables, and do it in a combined step
modelGreGpaRankFit = ols(formula='gre ~ gpa + rank', data=dfadmit).fit()
print( type(modelGreGpaRankFit) )
print( modelGreGpaRankFit.summary() )
modelpredicitons['gre_GpaRankLM'] = modelGreGpaRankFit.predict(dfadmit)
print(modelpredicitons.head())
print("\nReady to continue.")
#%%
# And let us check the VIF value (watch out for multicollinearity issues)
# Import functions
# from statsmodels.stats.outliers_influence import variance_inflation_factor
# # Get variables for which to compute VIF and add intercept term
# X = dfadmit[['gpa', 'rank']]
# X['Intercept'] = 1
# # Compute and view VIF
# vif = pd.DataFrame()
# vif["variables"] = X.columns
# vif["VIF"] = [ variance_inflation_factor(X.values, i) for i in range(X.shape[1]) ] # list comprehension
# # View results using print
# print(vif)
# print("\nReady to continue.")
#%% [markdown]
# But rank really should be categorical.
#
# # Patsy coding
#
# * Strings and booleans are automatically coded
# * Numerical → categorical
# * C() function
# * level 0 → (0,0,0,...)
# * level 1 → (1,0,0,...)
# * level 2 → (0,1,0,...)
# * Reference group
# * Default: first group
# * Treatment
# * levels
#%%
modelGreGpaCRankFit = ols(formula='gre ~ gpa + C(rank)', data=dfadmit).fit()
print( modelGreGpaCRankFit.summary() )
modelpredicitons['gre_GpaCRankLM'] = modelGreGpaCRankFit.predict(dfadmit)
print(modelpredicitons.head())
print("\nReady to continue.")
#%%
# Next try some interaction terms
#
# formula = 'y ~ x1 + x2'
# C(x1) : treat x1 as categorical variable
# -1 : remove intercept
# x1:x2 : an interaction term between x1 and x2
# x1*x2 : an interaction term between x1 and x2 and the individual variables
# np.log(x1) : apply vectorized functions to model variables
modelGreGpaXCRankFit = ols(formula='gre ~ gpa * C(rank)', data=dfadmit).fit()
print( modelGreGpaXCRankFit.summary() )
modelpredicitons['gre_GpaXCRankLM'] = modelGreGpaXCRankFit.predict(dfadmit)
print(modelpredicitons.head())
# This is essentially four different models for the four ranks of schools.
# QUESTION: Can you build a model which encompass four models for the four different schools
# with the same slope (for gpa) but allow for different intercepts?
print("\nReady to continue.")
#%% [markdown]
# # Logistic Regressions
#
# link function in glm
# https://www.statsmodels.org/stable/glm.html#families
# Gaussian(link = sm.families.links.identity) → the default family
# Binomial(link = sm.families.links.logit)
# probit, cauchy, log, and cloglog
# Poisson(link = sm.families.links.log)
# identity and sqrt
#%% [markdown]
#
# # Maximum Likelihood Estimation
#
# Likelihood vs Probability
# Conditional Probability: P (outcome A∣given B)
# Probability: P (data∣model)
# Likelihood: L(model∣data)
#
# If the error distribution is normal, and we chose to use a square (Euclidean)
# distance metric, then OLS and MLE produces the same result.
#%%
import statsmodels.api as sm # Importing statsmodels
# import statsmodels.formula.api as smf # Support for formulas
# from statsmodels.formula.api import glm # Use glm() directly
# 1. Describe the model → glm()
# 2. Fit the model → .fit()
# 3. Summarize the model → .summary()
# 4. Make model predictions → .predict()
# 1. Describe the model → glm()
# Two of the available styles:
# ARRAY based
# import statsmodels.api as sm
# X = sm.add_constant(X)
# model = sm.glm(y, X, family)
# FORMULA based (we had been using this for ols)
from statsmodels.formula.api import glm
# model = glm(formula, data, family)
modelAdmitGreLogit = glm(formula='admit ~ gre', data=dfadmit, family=sm.families.Binomial())
#%%
modelAdmitGreLogitFit = modelAdmitGreLogit.fit()
print( modelAdmitGreLogitFit.summary() )
modelpredicitons['admit_GreGpaLogit'] = modelAdmitGreLogitFit.predict(dfadmit)
# print(modelpredicitons.head())
# dm.dfChk(modelpredicitons)
print("\nReady to continue.")
#%% [markdown]
# # Deviance
# Formula
# D = −2LL(β)
# * Measure of error
# * Lower deviance → better model fit
# * Benchmark for comparison is the null deviance → intercept-only model / constant model
# * Evaluate
# * Adding a random noise variable would, on average, decrease deviance by 1
# * Adding k predictors to the model deviance should decrease by more than k
#%%
# The deviance of the model was 486.06 (or negative two times Log-Likelihood-function)
# df = 398
print(-2*modelAdmitGreLogitFit.llf)
# Compare to the null deviance
print(modelAdmitGreLogitFit.null_deviance)
# 499.98 # df = 399
# A decrease of 14 with just one variable. That's not bad.
#
# Another way to use the deviance value is to check the chi-sq p-value like this:
# Null model: chi-sq of 399.98, df = 399, the p-value is 0.000428 (can use scipy.stats.chisquare function)
# Our model: chi-sq of 486.06, df = 398, the p-value is 0.001641
# These small p-values (less than 0.05, or 5%) means reject the null hypothesis, which means the model is not a good fit with data. We want higher p-value here. Nonetheless, the one-variable model is a lot better than the null model.
print("\nReady to continue.")
#%%
# Now with more predictors
modelAdmitAllLogit = glm(formula='admit ~ gre+gpa+C(rank)', data=dfadmit, family=sm.families.Binomial())
modelAdmitAllLogitFit = modelAdmitAllLogit.fit()
print( modelAdmitAllLogitFit.summary() )
modelpredicitons['admit_GreAllLogit'] = modelAdmitAllLogitFit.predict(dfadmit)
# print(modelpredicitons.head())
# dm.dfChk(modelpredicitons)
# QUESTION: Is this model separable into four models for each rank with the
# same "intercept" or "slopes"?
# How can you generalize it to a more general case?
print("\nReady to continue.")
#%%
# Testing
modelAdmitTestLogit = glm(formula='admit ~ gre+gpa+C(rank)+gre*C(rank)', data=dfadmit, family=sm.families.Binomial())
modelAdmitTestLogitFit = modelAdmitTestLogit.fit()
print( modelAdmitTestLogitFit.summary() )
#%%
# To interpret the model properly, it is handy to have the exponentials of the coefficients
np.exp(modelAdmitAllLogitFit.params)
np.exp(modelAdmitAllLogitFit.conf_int())
print("\nReady to continue.")
#%%
# Confusion matrix
# Define cut-off value
cut_off = 0.3
# Compute class predictions
modelpredicitons['classLogitAll'] = np.where(modelpredicitons['admit_GreAllLogit'] > cut_off, 1, 0)
print(modelpredicitons.classLogitAll.head())
#
# Make a cross table
print(pd.crosstab(dfadmit.admit, modelpredicitons.classLogitAll,
rownames=['Actual'], colnames=['Predicted'],
margins = True))
#
#
# predicted
# 0 1
# Actual 0 True Negative TN False Positive FP
# Actual 1 False Negative FN True Positive TP
#
# Accuracy = (TP + TN) / Total
# Precision = TP / (TP + FP)
# Recall rate = TP / (TP + FN) = Sensitivity
# Specificity = TN / (TN + FP)
# F1_score is the "harmonic mean" of precision and recall
# F1 = 2 (precision)(recall)/(precision + recall)
print("\nReady to continue.")
#%%
# Now try the Titanic Dataset, and find out the survival chances from different predictors.
|
from ft_map import ft_map
from ft_filter import ft_filter
from ft_reduce import ft_reduce
iterable = [1, 2, 3, 4, 5]
functions = {
"map": ft_map,
"filter": ft_filter,
"reduce": ft_reduce,
}
def unit_test(message, function_to_test, function_to_apply, iterable):
try:
print("\n=> ", message)
out = functions[function_to_test](function_to_apply, iterable)
if hasattr(out, '__iter__') and not isinstance(out, str):
print(list(functions[function_to_test](function_to_apply, iterable)))
else:
print(functions[function_to_test](function_to_apply, iterable))
except TypeError as e:
print(e)
def test_errors(function_to_test, function_to_apply):
print(f"============= ft_{function_to_test} =============")
unit_test("Passing None as function_to_apply", function_to_test, None, iterable)
unit_test("Passing None as iterator", function_to_test, function_to_apply, None)
unit_test("Passing not iterable object", function_to_test, function_to_apply, 2)
unit_test("Passing a non valid function_to_apply", function_to_test, "LOL", iterable)
input("\n============= Press ENTER to continue ==>\n")
if __name__=="__main__":
test_errors("map", lambda dum: dum + 1)
test_errors("filter", lambda dum: not (dum % 2))
test_errors("reduce", lambda u, v: u + v)
unit_test("Valid test map 1", "map", lambda x: x + 2, [])
unit_test("Valid test map 2", "map", lambda x: x + 2, [1])
unit_test("Valid test map 3", "map", lambda x: x + 2, iterable)
unit_test("Valid test map 4", "map", lambda x: x ** 2, iterable)
input("\n============= Press ENTER to continue ==>\n")
unit_test("Valid test filter 1", "filter", lambda x: x <= 1, [])
unit_test("Valid test filter 2", "filter", lambda x: x <= 1, [2])
unit_test("Valid test filter 3", "filter", lambda x: x <= 1, [0])
unit_test("Valid test filter 4", "filter", lambda x: x <= 2, iterable)
input("\n============= Press ENTER to continue ==>\n")
unit_test("Valid test reduce 1", "reduce", lambda x, y: x + y, [])
unit_test("Valid test reduce 2", "reduce", lambda x, y: x + y, [1])
unit_test("Valid test reduce 4", "reduce", lambda x, y: x + y, iterable)
unit_test("Valid test reduce 4", "reduce", lambda x, y: x * y, iterable)
unit_test("Valid test reduce 3", "reduce", lambda x, y: x + y, ['H', 'o', 'l', 'a', ' ', 'l', 'o', 'k', 'i'])
input("\n============= Press ENTER to continue ==>\n")
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import datetime
import typing as t
import json
from dataclasses import dataclass, field
from mypy_boto3_dynamodb.service_resource import Table
from boto3.dynamodb.conditions import Attr, Key, And
from botocore.exceptions import ClientError
"""
Data transfer object classes to be used with dynamodbstore
Classes in this module should implement methods `to_dynamodb_item(self)` and
`to_sqs_message(self)`
"""
class DynamoDBItem:
CONTENT_KEY_PREFIX = "c#"
SIGNAL_KEY_PREFIX = "s#"
TYPE_PREFIX = "type#"
def write_to_table(self, table: Table):
table.put_item(Item=self.to_dynamodb_item())
def to_dynamodb_item(self) -> t.Dict:
raise NotImplementedError
@staticmethod
def get_dynamodb_content_key(c_id: str) -> str:
return f"{DynamoDBItem.CONTENT_KEY_PREFIX}{c_id}"
@staticmethod
def get_dynamodb_signal_key(source: str, s_id: t.Union[str, int]) -> str:
return f"{DynamoDBItem.SIGNAL_KEY_PREFIX}{source}#{s_id}"
@staticmethod
def remove_signal_key_prefix(key: str, source: str) -> str:
return key[len(DynamoDBItem.SIGNAL_KEY_PREFIX) + len(source) + 1 :]
@staticmethod
def get_dynamodb_type_key(type: str) -> str:
return f"{DynamoDBItem.TYPE_PREFIX}{type}"
@staticmethod
def remove_content_key_prefix(key: str) -> str:
return key[len(DynamoDBItem.CONTENT_KEY_PREFIX) :]
class AWSMessage:
def to_aws_message(self) -> str:
raise NotImplementedError
@classmethod
def from_aws_message(cls, message: str) -> "AWSMessage":
raise NotImplementedError
@dataclass
class SignalMetadataBase(DynamoDBItem):
"""
Base for signal metadata.
'ds' refers to dataset which for the time being is
quivalent to collab or privacy group (and in the long term could map to bank)
"""
DATASET_PREFIX = "ds#"
signal_id: t.Union[str, int]
ds_id: str
updated_at: datetime.datetime
signal_source: str
signal_hash: str # duplicated field with PDQMatchRecord having both for now to help with debuging/testing
tags: t.List[str] = field(default_factory=list)
@staticmethod
def get_dynamodb_ds_key(ds_id: str) -> str:
return f"{SignalMetadataBase.DATASET_PREFIX}{ds_id}"
@dataclass
class PDQSignalMetadata(SignalMetadataBase):
"""
PDQ Signal metadata.
This object is designed to be an ~lookaside on some of the values used by
PDQMatchRecord for easier and more consistent updating by the syncer and UI.
Otherwise updates on a signals metadata would require updating all
PDQMatchRecord associated; TODO: For now there will be some overlap between
this object and PDQMatchRecord.
"""
SIGNAL_TYPE = "pdq"
def to_dynamodb_item(self) -> dict:
return {
"PK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"SK": self.get_dynamodb_ds_key(self.ds_id),
"SignalHash": self.signal_hash,
"SignalSource": self.signal_source,
"UpdatedAt": self.updated_at.isoformat(),
"HashType": self.SIGNAL_TYPE,
"Tags": self.tags,
}
def update_tags_in_table_if_exists(self, table: Table) -> bool:
"""
Only write tags for object in table if the objects with matchig PK and SK already exist
(also updates updated_at).
Returns true if object existed and therefore update was successful otherwise false.
"""
try:
table.update_item(
Key={
"PK": self.get_dynamodb_signal_key(
self.signal_source, self.signal_id
),
"SK": self.get_dynamodb_ds_key(self.ds_id),
},
# service_resource.Table.update_item's ConditionExpression params is not typed to use its own objects here...
ConditionExpression=And(Attr("PK").exists(), Attr("SK").exists()), # type: ignore
ExpressionAttributeValues={
":t": self.tags,
":u": self.updated_at.isoformat(),
},
ExpressionAttributeNames={
"#T": "Tags",
"#U": "UpdatedAt",
},
UpdateExpression="SET #T = :t, #U = :u",
)
except ClientError as e:
if e.response["Error"]["Code"] != "ConditionalCheckFailedException":
raise e
return False
return True
@classmethod
def get_from_signal(
cls,
table: Table,
signal_id: t.Union[str, int],
signal_source: str,
) -> t.List["PDQSignalMetadata"]:
items = table.query(
KeyConditionExpression=Key("PK").eq(
cls.get_dynamodb_signal_key(signal_source, signal_id)
)
& Key("SK").begins_with(cls.DATASET_PREFIX),
ProjectionExpression="PK, ContentHash, UpdatedAt, SK, SignalSource, SignalHash, Tags",
FilterExpression=Attr("HashType").eq(cls.SIGNAL_TYPE),
).get("Items", [])
return cls._result_items_to_metadata(items)
@classmethod
def _result_items_to_metadata(
cls,
items: t.List[t.Dict],
) -> t.List["PDQSignalMetadata"]:
return [
PDQSignalMetadata(
signal_id=cls.remove_signal_key_prefix(
item["PK"], item["SignalSource"]
),
ds_id=item["SK"][len(cls.DATASET_PREFIX) :],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
signal_source=item["SignalSource"],
signal_hash=item["SignalHash"],
tags=item["Tags"],
)
for item in items
]
@dataclass
class PDQRecordBase(DynamoDBItem):
"""
Abstract Base Record for PDQ releated items.
"""
SIGNAL_TYPE = "pdq"
content_id: str
content_hash: str
updated_at: datetime.datetime
def to_dynamodb_item(self) -> dict:
raise NotImplementedError
def to_sqs_message(self) -> dict:
raise NotImplementedError
@classmethod
def get_from_time_range(
cls, table: Table, start_time: str = None, end_time: str = None
) -> t.List:
raise NotImplementedError
@dataclass
class PipelinePDQHashRecord(PDQRecordBase):
"""
Successful execution at the hasher produces this record.
"""
quality: int
def to_dynamodb_item(self) -> dict:
return {
"PK": self.get_dynamodb_content_key(self.content_id),
"SK": self.get_dynamodb_type_key(self.SIGNAL_TYPE),
"ContentHash": self.content_hash,
"Quality": self.quality,
"UpdatedAt": self.updated_at.isoformat(),
"HashType": self.SIGNAL_TYPE,
}
def to_sqs_message(self) -> dict:
return {
"hash": self.content_hash,
"type": self.SIGNAL_TYPE,
"key": self.content_id,
}
@classmethod
def get_from_content_id(
cls, table: Table, content_key: str
) -> t.Optional["PipelinePDQHashRecord"]:
items = HashRecordQuery.from_content_key(
table,
cls.get_dynamodb_content_key(content_key),
cls.get_dynamodb_type_key(cls.SIGNAL_TYPE),
)
records = cls._result_items_to_records(items)
return None if not records else records[0]
@classmethod
def get_from_time_range(
cls, table: Table, start_time: str = None, end_time: str = None
) -> t.List["PipelinePDQHashRecord"]:
items = HashRecordQuery.from_time_range(
table, cls.get_dynamodb_type_key(cls.SIGNAL_TYPE), start_time, end_time
)
return cls._result_items_to_records(items)
@classmethod
def _result_items_to_records(
cls,
items: t.List[t.Dict],
) -> t.List["PipelinePDQHashRecord"]:
return [
PipelinePDQHashRecord(
content_id=item["PK"][len(cls.CONTENT_KEY_PREFIX) :],
content_hash=item["ContentHash"],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
quality=item["Quality"],
)
for item in items
]
@dataclass
class PDQMatchRecord(PDQRecordBase):
"""
Successful execution at the matcher produces this record.
"""
signal_id: t.Union[str, int]
signal_source: str
signal_hash: str
def to_dynamodb_item(self) -> dict:
return {
"PK": self.get_dynamodb_content_key(self.content_id),
"SK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"ContentHash": self.content_hash,
"UpdatedAt": self.updated_at.isoformat(),
"SignalHash": self.signal_hash,
"SignalSource": self.signal_source,
"GSI1-PK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"GSI1-SK": self.get_dynamodb_content_key(self.content_id),
"HashType": self.SIGNAL_TYPE,
"GSI2-PK": self.get_dynamodb_type_key(self.SIGNAL_TYPE),
}
def to_sqs_message(self) -> dict:
# TODO add method for when matches are added to a sqs
raise NotImplementedError
@classmethod
def get_from_content_id(
cls, table: Table, content_id: str
) -> t.List["PDQMatchRecord"]:
items = MatchRecordQuery.from_content_key(
table,
cls.get_dynamodb_content_key(content_id),
cls.SIGNAL_KEY_PREFIX,
cls.SIGNAL_TYPE,
)
return cls._result_items_to_records(items)
@classmethod
def get_from_signal(
cls, table: Table, signal_id: t.Union[str, int], signal_source: str
) -> t.List["PDQMatchRecord"]:
items = MatchRecordQuery.from_signal_key(
table,
cls.get_dynamodb_signal_key(signal_source, signal_id),
cls.SIGNAL_TYPE,
)
return cls._result_items_to_records(items)
@classmethod
def get_from_time_range(
cls, table: Table, start_time: str = None, end_time: str = None
) -> t.List["PDQMatchRecord"]:
items = MatchRecordQuery.from_time_range(
table, cls.get_dynamodb_type_key(cls.SIGNAL_TYPE), start_time, end_time
)
return cls._result_items_to_records(items)
@classmethod
def _result_items_to_records(
cls,
items: t.List[t.Dict],
) -> t.List["PDQMatchRecord"]:
return [
PDQMatchRecord(
content_id=cls.remove_content_key_prefix(item["PK"]),
content_hash=item["ContentHash"],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
signal_id=cls.remove_signal_key_prefix(
item["SK"], item["SignalSource"]
),
signal_source=item["SignalSource"],
signal_hash=item["SignalHash"],
)
for item in items
]
class HashRecordQuery:
DEFAULT_PROJ_EXP = "PK, ContentHash, UpdatedAt, Quality"
@classmethod
def from_content_key(
cls, table: Table, content_key: str, hash_type_key: str = None
) -> t.List[t.Dict]:
"""
Given a content key (and optional hash type), return its content hash (for that type).
Written to be agnostic to hash type so it can be reused by other types of 'HashRecord's.
"""
if hash_type_key is None:
key_con_exp = Key("PK").eq(content_key) & Key("SK").begins_with(
DynamoDBItem.SIGNAL_KEY_PREFIX
)
else:
key_con_exp = Key("PK").eq(content_key) & Key("SK").eq(hash_type_key)
return table.query(
KeyConditionExpression=key_con_exp,
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
).get("Items", [])
@classmethod
def from_time_range(
cls, table: Table, hash_type: str, start_time: str = None, end_time: str = None
) -> t.List[t.Dict]:
"""
Given a hash type and time range, give me all the hashes found for that type and time range
"""
if start_time is None:
start_time = datetime.datetime.min.isoformat()
if end_time is None:
end_time = datetime.datetime.max.isoformat()
return table.scan(
FilterExpression=Key("SK").eq(hash_type)
& Key("UpdatedAt").between(start_time, end_time),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
).get("Items", [])
class MatchRecordQuery:
"""
Written to be agnostic to hash type so it can be reused by other types of 'MatchRecord's.
"""
DEFAULT_PROJ_EXP = (
"PK, ContentHash, UpdatedAt, SK, SignalSource, SignalHash, Labels"
)
@classmethod
def from_content_key(
cls,
table: Table,
content_key: str,
source_prefix: str = DynamoDBItem.SIGNAL_KEY_PREFIX,
hash_type: str = None,
) -> t.List[t.Dict]:
"""
Given a content key (and optional hash type), give me its content hash (for that type).
"""
filter_exp = None
if not hash_type is None:
filter_exp = Attr("HashType").eq(hash_type)
return table.query(
KeyConditionExpression=Key("PK").eq(content_key)
& Key("SK").begins_with(source_prefix),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
FilterExpression=filter_exp,
).get("Items", [])
@classmethod
def from_signal_key(
cls,
table: Table,
signal_key: str,
hash_type: str = None,
) -> t.List[t.Dict]:
"""
Given a Signal ID/Key (and optional hash type), give me any content matches found
"""
filter_exp = None
if not hash_type is None:
filter_exp = Attr("HashType").eq(hash_type)
return table.query(
IndexName="GSI-1",
KeyConditionExpression=Key("GSI1-PK").eq(signal_key),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
FilterExpression=filter_exp,
).get("Items", [])
@classmethod
def from_time_range(
cls, table: Table, hash_type: str, start_time: str = None, end_time: str = None
) -> t.List[t.Dict]:
"""
Given a hash type and time range, give me all the matches found for that type and time range
"""
if start_time is None:
start_time = datetime.datetime.min.isoformat()
if end_time is None:
end_time = datetime.datetime.max.isoformat()
return table.query(
IndexName="GSI-2",
KeyConditionExpression=Key("GSI2-PK").eq(hash_type)
& Key("UpdatedAt").between(start_time, end_time),
ProjectionExpression=cls.DEFAULT_PROJ_EXP,
).get("Items", [])
@dataclass
class MatchMessage(AWSMessage):
"""
Captures a set of matches that will need to be processed. We create one
match message for a single content key. It is possible that a single content
hash matches multiple datasets. When it does, the entire set of matches are
forwarded together so that any appropriate action can be taken.
- `content_key`: A way for partners to refer uniquely to content on their
site
- `content_hash`: The hash generated for the content_key
"""
content_key: str
content_hash: str
matching_banked_signals: t.List["BankedSignal"] = field(default_factory=list)
def to_aws_message(self) -> str:
return json.dumps(
{
"ContentKey": self.content_key,
"ContentHash": self.content_hash,
"MatchingBankedSignals": [
x.to_dict() for x in self.matching_banked_signals
],
}
)
@classmethod
def from_aws_message(cls, message: str) -> "MatchMessage":
parsed = json.loads(message)
return cls(
parsed["ContentKey"],
parsed["ContentHash"],
[BankedSignal.from_dict(d) for d in parsed["MatchingBankedSignals"]],
)
@dataclass
class BankedSignal:
"""
BankedSignal fields:
- `banked_content_id`: Inside the bank, the unique way to refer to what
was matched against
- `bank_id`: The unique way to refer to the bank banked_content_id came from
- `bank_source`: This is forward looking: this might be 'te' or 'local';
indicates source of or relationship between one or more banks
- `classifications`: a list of strings that provide additional context
about the banked signal
"""
banked_content_id: str
bank_id: str
bank_source: str
classifications: t.List[str] = field(default_factory=list)
def to_dict(self) -> dict:
return {
"BankedContentId": self.banked_content_id,
"BankId": self.bank_id,
"BankSource": self.bank_source,
"Classifications": self.classifications,
}
@classmethod
def from_dict(cls, d: dict) -> "BankedSignal":
return cls(
d["BankedContentId"], d["BankId"], d["BankSource"], d["Classifications"]
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Fang Zhang <thuzhf@gmail.com>
import sys,os,json,gzip,math,time,datetime,random,copy
import functools,itertools,requests,pickle,configparser
import argparse,logging,uuid,shutil,collections
from urllib.parse import urlparse, parse_qs
from collections import defaultdict as dd
import multiprocessing as mp
import numpy as np
import regex as re
import string
re.DEFAULT_VERSION = re.VERSION1
from FastKATE.utils.logger import simple_logger
logger = simple_logger(__name__, 'FastKATE/log')
from nltk.stem import WordNetLemmatizer
lemmatize = WordNetLemmatizer().lemmatize
lemmatize('')
def rand_str(n):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(n))
class Worker(mp.Process):
def __init__(self, handler_func, queue):
super().__init__()
self.handler_func = handler_func
self.queue = queue
def run(self):
for params in iter(self.queue.get, None):
self.handler_func(*params)
def parse_k_lines_for_pages(lines, outfile, line_no):
start_str = 'INSERT INTO `page` VALUES '
# arbitrary_str = r"'.*?(?<!(?<!\\)(?:\\\\)*\\)'"
arbitrary_str = r"'(?:\\\\|\\'|.)*?'"
page_regex_str = r"\(([+-]?\d+),([+-]?\d+),({0}),{0},[+-]?\d+,[+-]?\d+,[+-]?\d+,[0-9\.]+,{0},(?:{0}|NULL),[+-]?\d+,[+-]?\d+,(?:{0}|NULL),(?:{0}|NULL)\)".format(arbitrary_str)
page_regex = re.compile(page_regex_str)
results = []
for line in lines:
if line.startswith(start_str):
for i in page_regex.findall(line):
try:
page_id = eval(i[0])
page_namespace = eval(i[1])
page_title = eval(i[2])
if page_namespace == 0 or page_namespace == 14:
results.append((page_id, page_namespace, page_title))
except:
logger.info(i)
sys.exit()
with open(outfile, 'w', errors='surrogateescape') as f:
for r in results:
f.write('{}\t{}\t{}\n'.format(r[0], r[1], r[2]))
logger.info('DONE: {}'.format(line_no))
def extract_all_page_basic_info(pages_infile, pages_outfile, category_infile, category_outfile, all_pages_outfile):
extract_page_basic_info(pages_infile, pages_outfile)
extract_category_basic_info(category_infile, category_outfile)
pages_ids = {}
logger.info('loading pages_outfile...')
with open(pages_outfile, errors='surrogateescape') as f:
for line in f:
page_id, page_namespace, page_title = line.strip('\n').split('\t')
if page_namespace == '14':
page_title = 'Category:{}'.format(page_title)
pages_ids[page_id] = {'page_namespace': page_namespace, 'page_title': page_title}
logger.info('loading category_outfile...')
with open(category_outfile, errors='surrogateescape') as f:
for line in f:
page_id, page_namespace, page_title = line.strip('\n').split('\t')
page_title = 'Category:{}'.format(page_title)
pages_ids[page_id] = {'page_namespace': page_namespace, 'page_title': page_title}
logger.info('merging into all_pages_outfile...')
with open(all_pages_outfile, 'w', errors='surrogateescape') as f:
for i in pages_ids:
f.write('{}\t{}\t{}\n'.format(i, pages_ids[i]['page_namespace'], pages_ids[i]['page_title']))
return pages_ids
def extract_page_basic_info(pages_infile, pages_outfile):
line_no = 0
lines = []
workers = []
queue = mp.SimpleQueue()
for i in range(mp.cpu_count()):
workers.append(Worker(parse_k_lines_for_pages, queue))
for w in workers:
w.start()
tmp_dir = '/tmp/wiki_pages/'
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
os.makedirs(tmp_dir, exist_ok=True)
with open(pages_infile, errors='surrogateescape') as f:
for line in f:
# if line.startswith(start_str):
lines.append(line)
line_no += 1
if line_no % 5 == 0:
outfile = '{}{}.txt'.format(tmp_dir, line_no)
queue.put((lines, outfile, line_no))
lines = []
if lines:
outfile = '{}{}.txt'.format(tmp_dir, line_no)
queue.put((lines, outfile, line_no))
lines = []
for _ in workers:
queue.put(None)
for w in workers:
w.join()
with open(pages_outfile, 'w', errors='surrogateescape') as fout:
for filename in os.listdir(tmp_dir):
full_path = os.path.join(tmp_dir, filename)
with open(full_path, errors='surrogateescape') as fin:
fout.write(fin.read())
def parse_k_lines_for_categorylinks(lines, outfile, line_no):
start_str = 'INSERT INTO `categorylinks` VALUES '
# arbitrary_str = r"'.*?(?<!(?<!\\)(?:\\\\)*\\)'"
arbitrary_str = r"'(?:\\\\|\\'|.)*?'"
cl_regex_str = r"\(([+-]?\d+),({0}),{0},'.*?',{0},{0},('.*?')\)".format(arbitrary_str)
cl_regex = re.compile(cl_regex_str)
results = []
for line in lines:
if line.startswith(start_str):
for i in cl_regex.findall(line):
try:
cl_from = eval(i[0])
cl_to = eval(i[1])
cl_type = eval(i[2])
if cl_type == 'page' or cl_type == 'subcat':
results.append((cl_from, cl_to, cl_type))
except:
logger.info(i)
sys.exit()
with open(outfile, 'w', errors='surrogateescape') as f:
for r in results:
f.write('{}\t{}\t{}\n'.format(r[0], r[1], r[2]))
if line_no % 100 == 0:
logger.info('DONE: {}'.format(line_no))
def extract_categorylinks_basic_info(categorylinks_infile, categorylinks_outfile):
line_no = 0
lines = []
workers = []
queue = mp.SimpleQueue()
for i in range(mp.cpu_count()):
workers.append(Worker(parse_k_lines_for_categorylinks, queue))
for w in workers:
w.start()
tmp_dir = '/tmp/wiki_categorylinks/'
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
os.makedirs(tmp_dir, exist_ok=True)
with open(categorylinks_infile, errors='surrogateescape') as f:
for line in f:
# if line.startswith(start_str):
lines.append(line)
line_no += 1
if line_no % 1 == 0:
outfile = '{}{}.txt'.format(tmp_dir, line_no)
queue.put((lines, outfile, line_no))
lines = []
if lines:
outfile = '{}{}.txt'.format(tmp_dir, line_no)
queue.put((lines, outfile, line_no))
lines = []
for _ in workers:
queue.put(None)
for w in workers:
w.join()
with open(categorylinks_outfile, 'w', errors='surrogateescape') as fout:
for filename in os.listdir(tmp_dir):
full_path = os.path.join(tmp_dir, filename)
with open(full_path, errors='surrogateescape') as fin:
fout.write(fin.read())
def parse_k_lines_for_category(lines, outfile, line_no):
start_str = 'INSERT INTO `category` VALUES '
# arbitrary_str = r"'.*?(?<!(?<!\\)(?:\\\\)*\\)'"
arbitrary_str = r"'(?:\\\\|\\'|.)*?'"
c_regex_str = r"\(([+-]?\d+),({0}),[+-]?\d+,[+-]?\d+,[+-]?\d+\)".format(arbitrary_str)
c_regex = re.compile(c_regex_str)
results = []
for line in lines:
if line.startswith(start_str):
for i in c_regex.findall(line):
try:
cat_id = eval(i[0])
cat_title = eval(i[1])
results.append((cat_id, 14, cat_title)) # 14 stands for category
except:
logger.info(i)
sys.exit()
with open(outfile, 'w', errors='surrogateescape') as f:
for r in results:
f.write('{}\t{}\t{}\n'.format(r[0], r[1], r[2]))
logger.info('DONE: {}'.format(line_no))
def extract_category_basic_info(category_infile, category_outfile):
line_no = 0
lines = []
workers = []
queue = mp.SimpleQueue()
for i in range(mp.cpu_count()):
workers.append(Worker(parse_k_lines_for_category, queue))
for w in workers:
w.start()
tmp_dir = '/tmp/wiki_category/'
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
os.makedirs(tmp_dir, exist_ok=True)
with open(category_infile, errors='surrogateescape') as f:
for line in f:
# if line.startswith(start_str):
lines.append(line)
line_no += 1
if line_no % 1 == 0:
outfile = '{}{}.txt'.format(tmp_dir, line_no)
queue.put((lines, outfile, line_no))
lines = []
if lines:
outfile = '{}{}.txt'.format(tmp_dir, line_no)
queue.put((lines, outfile, line_no))
lines = []
for _ in workers:
queue.put(None)
for w in workers:
w.join()
with open(category_outfile, 'w', errors='surrogateescape') as fout:
for filename in os.listdir(tmp_dir):
full_path = os.path.join(tmp_dir, filename)
with open(full_path, errors='surrogateescape') as fin:
fout.write(fin.read())
def construct_pages_taxonomy(all_pages_outfile_as_infile, categorylinks_outfile_as_infile, taxonomy_lemmatized_outfile):
pages_ids = {}
pages_titles = {}
logger.info('loading all_pages_outfile_as_infile...')
with open(all_pages_outfile_as_infile, errors='surrogateescape') as f:
for line in f:
page_id, page_namespace, page_title = line.strip('\n').split('\t')
pages_ids[page_id] = {'page_namespace': page_namespace, 'page_title': page_title}
pages_titles[page_title] = {'page_namespace': page_namespace, 'page_id': page_id, 'subcats': [], 'subpages': []}
logger.info('loading categorylinks_outfile_as_infile...')
with open(categorylinks_outfile_as_infile, errors='surrogateescape') as f:
for line in f:
cl_from, cl_to, cl_type = line.strip('\n').split('\t')
cl_to = 'Category:{}'.format(cl_to)
if cl_to not in pages_titles:
logger.info('Category missing: {}'.format(cl_to))
elif cl_from not in pages_ids:
# logger.info('Page/Category missing: {}'.format(cl_from))
continue
else:
if cl_type == 'page':
pages_titles[cl_to]['subpages'].append(pages_ids[cl_from]['page_title'])
elif cl_type == 'subcat':
pages_titles[cl_to]['subcats'].append(pages_ids[cl_from]['page_title'])
pages_titles_lemmatized = {}
logger.info('lemmatize all wiki titles...')
for t in pages_titles:
tl = t[9:].lower().replace('-', '_') # 9 == len('Category:')
tl = normalize_name_for_querying_vector_model(tl)
tmp = pages_titles[t]
if tl not in pages_titles_lemmatized:
pages_titles_lemmatized[tl] = {'page_id': set(), 'subcats': set(), 'subpages': set()}
pages_titles_lemmatized[tl]['page_id'].add(int(tmp['page_id']))
for sc in tmp['subcats']:
sc = normalize_name_for_querying_vector_model(sc[9:].lower().replace('-', '_'))
pages_titles_lemmatized[tl]['subcats'].add(sc)
for sp in tmp['subpages']:
sp = normalize_name_for_querying_vector_model(sp.lower().replace('-', '_'))
pages_titles_lemmatized[tl]['subpages'].add(sp)
logger.info('pickling into {}...'.format(taxonomy_lemmatized_outfile))
with open(taxonomy_lemmatized_outfile, 'wb') as f:
pickle.dump(pages_titles_lemmatized, f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('timestamp', help='the timestamp of wikipedia dumps that you have downloaded')
parser.add_argument('wikidata_dir', help='the directory of wikipedia dumps that you have downloaded')
parser.add_argument('--job', default='pages_taxonomy', help='pages_basic_info/categorylinks/pages_taxonomy')
args = parser.parse_args()
timestamp = args.timestamp
pages_infile = os.path.join(args.wikidata_dir, 'enwiki-{}-page.sql'.format(timestamp))
pages_outfile = os.path.join(args.wikidata_dir, 'enwiki-{}-page-outfile'.format(timestamp))
categorylinks_infile = os.path.join(args.wikidata_dir, 'enwiki-{}-categorylinks.sql'.format(timestamp))
categorylinks_outfile = os.path.join(args.wikidata_dir, 'enwiki-{}-categorylinks-outfile'.format(timestamp))
category_infile = os.path.join(args.wikidata_dir, 'enwiki-{}-category.sql'.format(timestamp))
category_outfile = os.path.join(args.wikidata_dir, 'enwiki-{}-category-outfile'.format(timestamp))
all_pages_outfile = os.path.join(args.wikidata_dir, 'enwiki-{}-all-pages-outfile'.format(timestamp))
taxonomy_lemmatized_outfile = os.path.join(args.wikidata_dir, 'taxonomy_lemmatized.pkl')
if args.job == 'pages_basic_info':
extract_all_page_basic_info(pages_infile, pages_outfile, category_infile, category_outfile, all_pages_outfile)
elif args.job == 'categorylinks':
extract_categorylinks_basic_info(categorylinks_infile, categorylinks_outfile)
elif args.job == 'pages_taxonomy':
if not os.path.isfile(all_pages_outfile) or not os.path.getsize(all_pages_outfile):
extract_all_page_basic_info(pages_infile, pages_outfile, category_infile, category_outfile, all_pages_outfile)
if not os.path.isfile(categorylinks_outfile) or not os.path.getsize(categorylinks_outfile):
extract_categorylinks_basic_info(categorylinks_infile, categorylinks_outfile)
construct_pages_taxonomy(all_pages_outfile, categorylinks_outfile, taxonomy_lemmatized_outfile)
if __name__ == '__main__':
start_t = time.time()
main()
end_t = time.time()
t = end_t - start_t
logger.info('Time elapsed: {:.4f} minutes'.format(t / 60.))
|
import numpy as np
import pandas as pd
import datetime
import argparse
def readCSV(dt):
"""
Read the CSV file into a dataframe for a YYYY-MM (dt)
Do preliminary cleaning
arg: dt -- string with format YYYY-MM
return df: dataframe containing data from csv
"""
folder = 'raw_data/'
filename = 'output-' + str(dt) + '-01T00_00_00+00_00.csv'
df = pd.read_csv(folder+filename)
df.when_captured = pd.to_datetime(df.when_captured)
# Need to change the format of the Time Stamp for all the measurements in the raw data
df.service_uploaded = df.service_uploaded.apply(lambda x: \
datetime.datetime.strptime(x, '%b %d, %Y @ %H:%M:%S.%f')\
.replace(tzinfo=datetime.timezone.utc))
#### Add a column for the year
df['year'] = pd.DatetimeIndex(df['when_captured']).year
#### Need to correct for the format of the PM numeric values.
df['pms_pm01_0'] = df['pms_pm01_0'].astype(str).str.replace(',', '').astype(float)
df['pms_pm10_0'] = df['pms_pm10_0'].astype(str).str.replace(',', '').astype(float)
df['pms_pm02_5'] = df['pms_pm02_5'].astype(str).str.replace(',', '').astype(float)
return df
def findBadData(df):
'''
return the badRecords, i.e. (device, whenCaptured) key for records that have more than one
records for the same key (as this is not possible physically)
'''
temp_df = df.groupby(['device','when_captured']).size().to_frame('size').\
reset_index().sort_values('size', ascending=False)
print("bad device data counts: ")
badRecords = temp_df[(temp_df['size']>1)]
print(badRecords)
print("all bad device list: ")
# Devices that have misbehaved at some point - more than one data values per time stamp
print(np.unique(temp_df[temp_df['size']>1]['device'].values)) # devices that have misbehaved
return badRecords
def rmInvalidTimeStamps(df):
"""
remove invalid time stamped records
## remove records with NULL `when_captured`
## remove records where `when_captured` is an invalid
## remove records where gap of `service_uploaded` and `when_captured` > 7 days
"""
## remove records with NULL `when_captured`
print("Null date records to remove: ", df['when_captured'].isna().sum())
df = df[df['when_captured'].notna()]
print("df shape after remove records with NULL `when_captured` : ",df.shape)
## remove records where `when_captured` is an invalid
boolean_condition = df['when_captured'] > pd.to_datetime(2000/1/19, infer_datetime_format=True).tz_localize('UTC')
print("Valid `when_captured` entires: ", boolean_condition.sum())
df = df[df['when_captured'] > pd.to_datetime(2000/1/19, infer_datetime_format=True).tz_localize('UTC')]
print("df shape after remove records where `when_captured` is an invalid : ",df.shape)
## remove records where gap of `service_uploaded` and `when_captured` > 7 days
boolean_condition = abs(df['when_captured'].subtract(df['service_uploaded'])).astype('timedelta64[D]') < 7
boolean_condition.shape
print("Lag 7 days to remove: ",df.shape[0] - (boolean_condition).sum())
df = df[boolean_condition]
print("df shape after records where gap of `service_uploaded` and `when_captured` > 7 days : ",df.shape)
return df
def imputeInaccurateRH(df):
"""
impute data with NaN(missing) for inaccurate values of RH
"""
boolean_condition = (df['env_humid']<0) | (df['env_humid']>100)
column_name = 'env_humid'
new_value = np.nan
df.loc[boolean_condition, column_name] = new_value
print("Inaccurate RH records imputed: ", boolean_condition.sum())
return df
def dropServiceUploaded(df):
"""
Inplace dropping of the 'service_uploaded' column
"""
df.drop('service_uploaded', axis=1, inplace=True)
def rmDuplicates(df):
"""
Inplace dropping of duplicates
preserve a single copy of duplicative rows
"""
incoming = df.shape[0]
df.drop_duplicates(subset=df.columns[0:df.shape[1]], inplace=True, keep='first') # args: subset=[df.columns[0:df.shape[1]]], keep = 'first'
print("Number of duplicative entries removed : ", -df.shape[0]+incoming)
def dataAggWithKey(df):
"""
Aggregate the df based on key: ('device','when_captured')
arg: df - incoming dataframe
return: dataframe with COUNTS and COUNT-DISTINCTS for each key
"""
# STEP 1: Aggregate the dataframe based on key
temp_df = df.groupby(['device','when_captured']).agg(['count','nunique'])
# temp_df.info()
num_groups = temp_df.shape[0]
print("num_groups is : ", num_groups)
# STEP 2: Merge Counts and Count-Distincts to check for duplicative records and multiplicities
even = list(range(0,26,2))
odd = list(range(1,26,2))
tmp_df1 = temp_df.iloc[:,even].max(axis=1).to_frame('COUNTS').reset_index()
tmp_df2 = temp_df.iloc[:,odd].max(axis=1).to_frame('DISTINCTS').reset_index()
print(tmp_df1.shape, tmp_df2.shape)
merged = pd.merge(tmp_df1, tmp_df2, left_on = ['device', 'when_captured'], \
right_on=['device', 'when_captured'])
return merged, num_groups
def identifyALLNanRecs(merged):
"""
Actionable: Records of useless data with all NaNs
args: incoming datframe with COUNTS and COUNT-DISTINCTS for each key
return : keys dataframe ('device', 'when_captured') to remove later
"""
bool1 = (merged.COUNTS >1) & (merged.DISTINCTS==0)
sum1 = bool1.sum()
print(sum1)
toDiscard1 = merged.loc[:,['device', 'when_captured']][bool1]
toDiscard1.shape
return sum1, toDiscard1
def identifyMultivaluedTimeStamps(merged):
"""
Actionable: Records that are a mix of duplicates and non-duplicate rows
for a given (`device`, `when_captured`) [must be all discarded]
args: incoming datframe with COUNTS and COUNT-DISTINCTS for each key
return : keys dataframe ('device', 'when_captured') to remove later
"""
bool3 = (merged.COUNTS >1) & (merged.DISTINCTS>1)
sum3 = bool3.sum()
print(sum3)
toDiscard3 = merged.loc[:,['device', 'when_captured']][bool3]
toDiscard3.shape
return sum3, toDiscard3
def identifyRemainingDupl(merged):
"""
Actionable: even though duplicates were dropped, there can still be records for which (merged.COUNTS >1) & (merged.DISTINCTS==1)
: consider the case where one of the records for the key under consideration has meaningful values
: but the other record has all NaNs for the same key. Ex. (Oct 18, 2018 @ 10:36:24.000 , 2299238163): row 22618
Records where all rows are purely duplicates [preserve only 1 later]
args: incoming datframe with COUNTS and COUNT-DISTINCTS for each key
"""
bool2 = (merged.COUNTS >1) & (merged.DISTINCTS==1)
sum2 = bool2.sum()
print("remaining duplicates check : " ,merged.COUNTS[bool2].sum() - merged.DISTINCTS[bool2].sum())
toDiscard2 = merged.loc[:,['device', 'when_captured']][bool2]
toDiscard2.shape
return sum2, toDiscard2
def goodTimeStamps(merged):
"""
Records that are good
"""
bool4 = (merged.COUNTS ==1) & (merged.DISTINCTS==1)
sum4 = bool4.sum()
print('good records : ', sum4)
return sum4
def writeDF(dt, dframe, descrpt):
"""
write multivalued timestamps' keys to a csv
args: dframe to write
descrpt: string with description to append to file
"""
# dframe.info()
print("written records shape : ", dframe.shape)
dframe.to_csv('cleaned_data/' + str(dt) + '-01_' + str(descrpt) + '.csv')
def filterRows(toDiscard1, toDiscard2, toDiscard3, df):
"""
Inplace discarding of rows based on allNaN record keys (in df : toDiscard1)
and rows based on MultivaluedTimeStamps keys (in df : toDiscard3)
from original dataframe: df
args:
toDiscard1: allNaN record keys
toDiscard2: identifyRemainingDuplcates: records where (merged.COUNTS >1) & (merged.DISTINCTS==1)
toDiscard3: MultivaluedTimeStamps keys
df: original dataframe
"""
# STEP 1 :
# all tuples of keys to be discarded
discard = pd.concat([toDiscard1, toDiscard2, toDiscard3], ignore_index=True)
discard['KEY_Dev_WhenCapt'] = list(zip(discard.device, discard.when_captured))
print(df.shape, discard.shape)
# STEP 2 :
# tuples of all keys in the dataframe
df['KEY_Dev_WhenCapt'] = list(zip(df.device, df.when_captured))
df.shape
# STEP 3 :
# discard the rows
rows_to_discard = df['KEY_Dev_WhenCapt'].isin(discard['KEY_Dev_WhenCapt'])
print("these many rows to discard: ", rows_to_discard.sum())
incoming = df.shape[0]
df = df[~rows_to_discard]
print(incoming - df.shape[0])
return df
def cleanSolarCastData(dt):
"""
Master Function to clean all the data with the helper functions in `Data_Cleansing_Single_file`
arg: dt: The function returns the cleaned data frame for the YYYY-MM corresponding to "dt"
return : df: cleaned dataframe
"""
df = readCSV(dt)
findBadData(df)
df = rmInvalidTimeStamps(df)
print("new df: ", df.shape)
df = imputeInaccurateRH(df)
print("new df: ", df.shape)
dropServiceUploaded(df)
print("new df after dropping service_uploaded col: ", df.shape)
rmDuplicates(df)
print("new df after removing duplicates: ", df.shape)
merged,num_groups = dataAggWithKey(df)
print("merged: ", merged.shape)
print("num_groups : ", num_groups)
sum1, toDiscard1 = identifyALLNanRecs(merged)
sum3, toDiscard3 = identifyMultivaluedTimeStamps(merged)
sum2, toDiscard2 = identifyRemainingDupl(merged)
sum4 = goodTimeStamps(merged)
print("toDiscard1 shape: ",toDiscard1.shape)
print("toDiscard2 shape: ",toDiscard2.shape)
print("toDiscard3 shape: ",toDiscard3.shape)
# sanityCheck(): ensure you have all records covered by 1 of the 4 conditions
assert(num_groups == sum1+sum2+sum3+sum4)
writeDF(dt, toDiscard3, 'MultivaluedTimeStamps')
df = filterRows(toDiscard1, toDiscard2, toDiscard3, df)
print("final df shape: ", df.shape)
### Now check to make sure no garbage data is left
badRecordsLeft = findBadData(df)
if not badRecordsLeft.empty:
print("Still bad records remaining:", badRecordsLeft)
assert(badRecordsLeft.empty)
return df
def cleanAndWriteDF(dt):
df = cleanSolarCastData(dt)
print(df.shape)
# Check how many devices there are in the dataset
devices = np.unique(df.device.values)
print(len(devices))
print(devices)
# *Sort the time series -- it's unsorted.*
df.sort_values(by=['when_captured'], inplace=True)
# Write the files
descrpt = 'cleaned'
writeDF(dt, df, descrpt)
return df
def readCleanedDF(dt, descrpt):
"""
Read the cleaned & pre-sorted CSV file into a dataframe for a YYYY-MM (dt)
Do preliminary cleaning
arg: dt -- string with format YYYY-MM
return df: dataframe containing data from csv
"""
folder = './'
filename = str(dt) + '-01_' + str(descrpt) + '.csv'
df = pd.read_csv(folder+filename)
return df
def cleanAndWriteMainDF(start_yyyymm, end_yyyymm):
"""
Cleans each month's data and saves it; also concatenate all the data into a single DataFrame,
sort, and then save
arg: start_yyyymm -- string with format YYYY-MM; earliest month for which data is available
end_yyyymm -- string with format YYYY-MM; latest month for which data is available
"""
dfList = []
for dt in pd.date_range(start_yyyymm, end_yyyymm, freq='MS').strftime("%Y-%m").tolist():
print("========================")
print("========================", dt, "========================")
print("========================")
df = cleanAndWriteDF(dt)
dfList.append(df)
mainDF = pd.concat(dfList, ignore_index=True)
mainDF.when_captured = pd.to_datetime(mainDF.when_captured)
mainDF.sort_values(by=['when_captured'], inplace=True)
mainDF['lnd_7318u'] = mainDF['lnd_7318u'].astype(str).str.replace(',', '').astype(float)
writeDF('Solarcast', mainDF, 'Main_Cleaned_Sorted')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('start_yyyymm',
help='string with format YYYY-MM; earliest month for which data is available')
parser.add_argument('end_yyyymm',
help='string with format YYYY-MM; latest month for which data is available')
args = parser.parse_args()
cleanAndWriteMainDF(args.start_yyyymm, args.end_yyyymm)
|
import numpy as np
from glacier.physics import GlacierParameters
from glacier.solvers import Solver
L = 500
n_x = L + 1
xs = np.linspace(0, L, n_x)
H = 25
alpha = np.radians(3)
t_end = 10
h_0 = 50
upwind_scheme = False
steady_state = False
plot_initial = False
if steady_state:
q_0 = 1
glacier = GlacierParameters(
xs=xs, alpha=alpha, q_0=0, x_s=xs[-1] * 0.3, x_f=xs[-1] * 0.6, h_0=h_0
)
else:
glacier = GlacierParameters(
xs=xs, alpha=alpha, q_0=1e0, x_s=xs[-1] * 0, x_f=xs[-1] * 0.9, h_0=h_0
)
q = glacier.q.unscaled * 3600 * 24 * 365
left_height = h_0
h_0 = np.zeros(len(xs))
h_0[0] = left_height
glacier = GlacierParameters(xs=xs, q=q, h_0=h_0, alpha=alpha)
if plot_initial:
glacier.plot()
if upwind_scheme:
solver = Solver(glacier=glacier, name='upwind')
solver.solve(t_end, method="upwind")
else:
solver = Solver(glacier=glacier, name='finite_volume')
solver.solve(t_end, method="finite volume", save_every=100)
solver.calculate_flow_fields(save_every=20)
solver.animate(plot_step=10, show=True)
# animate_glacier(solver, interval=1, plot_interval=10, flow_field=False)
|
import cv2
o = cv2.imread(r"..\lena.jpg")
g = cv2.GaussianBlur(o, (55, 55), 0, 0)
b = cv2.bilateralFilter(o, 55, 100, 100)
cv2.imshow("original", o)
cv2.imshow("Gaussian", g)
cv2.imshow("bilateral", b)
cv2.waitKey()
cv2.destroyAllWindows()
|
import os
import torchvision.transforms as transforms
import flow_transforms
from imageio import imread, imsave
from skimage import img_as_ubyte
from loss import *
def vis(img_path, csv_path, save_path):
input_transform = transforms.Compose([
flow_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
transforms.Normalize(mean=[0.411, 0.432, 0.45], std=[1, 1, 1])
])
img_ = imread(img_path)
ori_img = input_transform(img_)
mean_values = torch.tensor([0.411, 0.432, 0.45], dtype=ori_img.cuda().unsqueeze(0).dtype).view(3, 1, 1)
spixel_label_map = np.loadtxt(csv_path, delimiter=",")
n_spixel = len(np.unique(spixel_label_map))
given_img_np = (ori_img + mean_values).clamp(0, 1).detach().cpu().numpy().transpose(1, 2, 0)
spixel_bd_image = mark_boundaries(given_img_np / np.max(given_img_np), spixel_label_map.astype(int), color=(1, 1, 1))
spixel_viz = spixel_bd_image.astype(np.float32).transpose(2, 0, 1)
imgId = os.path.basename(img_path)[:-4]
# ************************ Save all result*******************************************
# save spixel viz
if not os.path.isdir(os.path.join(save_path, 'spixel_viz')):
os.makedirs(os.path.join(save_path, 'spixel_viz'))
spixl_save_name = os.path.join(save_path, 'spixel_viz', imgId + '_sPixel.png')
imsave(spixl_save_name, img_as_ubyte(spixel_viz.transpose(1, 2, 0)))
return
def main():
img_path = './BSD500/ori_sz/test/100099_img.jpg'
# img_path = './nyu_test_set/nyu_preprocess_tst/img/00044.jpg'
csv_path = '/media/yuanqing/ssd/code/visual_tracking/SNIC_mex/output/BSD/600/100099_img.csv'
output_path = './output/vis/bsd/snic'
# if not os.path.isdir(output_path):
# os.makedirs(output_path)
vis(img_path, csv_path, output_path)
if __name__ == '__main__':
main()
|
from collections import OrderedDict
from concurrent import futures
from datetime import datetime, timedelta
import ftplib
import math
import os
from os import PathLike
from pathlib import Path
from typing import Collection
import fiona.crs
import numpy
import rasterio
from rasterio.crs import CRS
from rasterio.enums import Resampling
import rasterio.features
import shapely
import shapely.geometry
import shapely.wkt
import xarray
import PyOFS
from PyOFS import (
CRS_EPSG,
DATA_DIRECTORY,
LEAFLET_NODATA_VALUE,
TIFF_CREATION_OPTIONS,
get_logger,
utilities,
)
LOGGER = get_logger('PyOFS.VIIRS')
VIIRS_START_TIME = datetime.strptime('2012-03-01 00:10:00', '%Y-%m-%d %H:%M:%S')
VIIRS_PERIOD = timedelta(days=16)
PASS_TIMES_FILENAME = DATA_DIRECTORY / 'reference' / 'viirs_pass_times.txt'
STUDY_AREA_POLYGON_FILENAME = DATA_DIRECTORY / 'reference' / 'wcofs.gpkg:study_area'
OUTPUT_CRS = fiona.crs.from_epsg(CRS_EPSG)
NRT_DELAY = timedelta(hours=2)
SOURCE_URLS = OrderedDict(
{
'OpenDAP': OrderedDict(
{
'NESDIS': 'https://www.star.nesdis.noaa.gov/thredds/dodsC',
'JPL': 'https://podaac-opendap.jpl.nasa.gov:443/opendap/allData/ghrsst/data/GDS2/L3U',
'NODC': 'https://data.nodc.noaa.gov/thredds/catalog/ghrsst/L3U',
}
),
'FTP': OrderedDict({'NESDIS': 'ftp.star.nesdis.noaa.gov/pub/socd2/coastwatch/sst'}),
}
)
class VIIRSDataset:
"""
Visible Infrared Imaging Radiometer Suite (VIIRS) sea-surface temperature.
"""
study_area_transform = None
study_area_extent = None
study_area_bounds = None
study_area_coordinates = None
def __init__(
self,
data_time: datetime = None,
satellite: str = 'NPP',
study_area_polygon_filename: PathLike = STUDY_AREA_POLYGON_FILENAME,
algorithm: str = 'OSPO',
version: str = None,
):
"""
Retrieve VIIRS NetCDF observation from NOAA with given datetime.
:param data_time: observation datetime
:param satellite: VIIRS platform
:param study_area_polygon_filename: filename of vector file containing study area boundary
:param algorithm: either 'STAR' or 'OSPO'
:param version: ACSPO algorithm version
:raises NoDataError: if observation does not exist
"""
if not isinstance(study_area_polygon_filename, Path):
study_area_polygon_filename = Path(study_area_polygon_filename)
if data_time is None:
data_time = datetime.now()
# round minute to nearest 10 minutes (VIIRS data interval)
self.data_time = PyOFS.round_to_ten_minutes(data_time)
self.satellite = satellite
self.study_area_polygon_filename = study_area_polygon_filename
# use NRT flag if granule is less than 13 days old
self.near_real_time = datetime.now() - data_time <= timedelta(days=13)
self.algorithm = algorithm
if version is None:
if data_time >= datetime(2019, 4, 23, 12, 50):
self.version = '2.61'
elif data_time >= datetime(2018, 11, 7, 15, 10):
self.version = '2.60'
elif data_time >= datetime(2017, 9, 14, 12, 50):
self.version = '2.41'
else:
self.version = '2.40'
else:
self.version = version
self.url = None
day_dir = f'{self.data_time.year}/{self.data_time.timetuple().tm_yday:03}'
filename = f'{self.data_time:%Y%m%d%H%M%S}-{self.algorithm}-L3U_GHRSST-SSTsubskin-VIIRS_{self.satellite.upper()}-ACSPO_V{self.version}-v02.0-fv01.0.nc'
# TODO N20 does not yet have a reanalysis archive on NESDIS (as of March 8th, 2019)
if self.satellite.upper() == 'N20' and not self.near_real_time:
raise PyOFS.NoDataError(
f'{self.satellite.upper()} does not yet have a reanalysis archive'
)
for source, source_url in SOURCE_URLS['OpenDAP'].items():
url = source_url
if self.near_real_time:
if source == 'NESDIS':
url = f'{source_url}/grid{self.satellite.upper()}VIIRSNRTL3UWW00/{day_dir}/{filename}'
elif source == 'JPL':
url = f'{source_url}/VIIRS_{self.satellite.upper()}/{algorithm}/v{self.version}/{day_dir}/{filename}'
elif source in 'NODC':
url = f'{source_url}/VIIRS_{self.satellite.upper()}/{algorithm}/{day_dir}/{filename}'
else:
if source == 'NESDIS':
url = f'{source_url}/grid{"" if self.near_real_time else "S"}{self.satellite.upper()}VIIRSSCIENCEL3UWW00/{day_dir}/{filename}'
else:
LOGGER.warning(f'{source} does not contain a reanalysis archive')
try:
self.dataset = xarray.open_dataset(url)
self.url = url
break
except Exception as error:
LOGGER.warning(f'{error.__class__.__name__}: {error}')
if self.url is None:
LOGGER.warning('Error collecting from OpenDAP; falling back to FTP...')
for source, source_url in SOURCE_URLS['FTP'].items():
host_url, ftp_input_dir = source_url.split('/', 1)
ftp_path = ftp_input_dir
url = host_url
if source == 'NESDIS':
if self.near_real_time:
ftp_path = f'/{ftp_input_dir}/nrt/viirs/{self.satellite.lower()}/l3u/{day_dir}/{filename}'
else:
ftp_path = f'/{ftp_input_dir}/ran/viirs/{"S" if self.satellite.upper() == "NPP" else ""}{self.satellite.lower()}/l3u/{day_dir}/{filename}'
url = f'{host_url}/{ftp_path.lstrip("/")}'
try:
with ftplib.FTP(host_url) as ftp_connection:
ftp_connection.login()
output_dir = DATA_DIRECTORY / 'input' / 'viirs'
if not output_dir.exists():
os.makedirs(output_dir, exist_ok=True)
output_filename = output_dir / f'viirs_{self.data_time:%Y%m%dT%H%M}.nc'
if output_filename.exists():
os.remove(output_filename)
try:
with open(output_filename, 'wb') as output_file:
ftp_connection.retrbinary(
f'RETR {ftp_path}', output_file.write
)
self.dataset = xarray.open_dataset(output_filename)
except:
raise
finally:
os.remove(output_filename)
self.url = url
break
except Exception as error:
LOGGER.warning(f'{error.__class__.__name__}: {error}')
if self.url is not None:
break
if self.url is None:
raise PyOFS.NoDataError(f'No VIIRS observation found at {self.data_time} UTC.')
# construct rectangular polygon of granule extent
if 'geospatial_bounds' in self.dataset.attrs:
self.data_extent = shapely.wkt.loads(self.dataset.geospatial_bounds)
elif 'geospatial_lon_min' in self.dataset.attrs:
lon_min = float(self.dataset.geospatial_lon_min)
lon_max = float(self.dataset.geospatial_lon_max)
lat_min = float(self.dataset.geospatial_lat_min)
lat_max = float(self.dataset.geospatial_lat_max)
if lon_min < lon_max:
self.data_extent = shapely.geometry.Polygon(
[
(lon_min, lat_max),
(lon_max, lat_max),
(lon_max, lat_min),
(lon_min, lat_min),
]
)
else:
# geospatial bounds cross the antimeridian, so we create a multipolygon
self.data_extent = shapely.geometry.MultiPolygon(
[
shapely.geometry.Polygon(
[
(lon_min, lat_max),
(180, lat_max),
(180, lat_min),
(lon_min, lat_min),
]
),
shapely.geometry.Polygon(
[
(-180, lat_max),
(lon_max, lat_max),
(lon_max, lat_min),
(-180, lat_min),
]
),
]
)
else:
LOGGER.warning(f'{self.data_time} UTC: Dataset has no stored bounds...')
lon_pixel_size = self.dataset.geospatial_lon_resolution
lat_pixel_size = self.dataset.geospatial_lat_resolution
if VIIRSDataset.study_area_extent is None:
LOGGER.debug(
f'Calculating indices and transform from granule at {self.data_time} UTC...'
)
# get first record in layer
VIIRSDataset.study_area_extent = shapely.geometry.MultiPolygon(
[
shapely.geometry.Polygon(polygon[0])
for polygon in utilities.get_first_record(
self.study_area_polygon_filename
)['geometry']['coordinates']
]
)
VIIRSDataset.study_area_bounds = VIIRSDataset.study_area_extent.bounds
VIIRSDataset.study_area_transform = rasterio.transform.from_origin(
VIIRSDataset.study_area_bounds[0],
VIIRSDataset.study_area_bounds[3],
lon_pixel_size,
lat_pixel_size,
)
if VIIRSDataset.study_area_bounds is not None:
self.dataset = self.dataset.isel(time=0).sel(
lon=slice(
VIIRSDataset.study_area_bounds[0], VIIRSDataset.study_area_bounds[2]
),
lat=slice(
VIIRSDataset.study_area_bounds[3], VIIRSDataset.study_area_bounds[1]
),
)
if VIIRSDataset.study_area_coordinates is None:
VIIRSDataset.study_area_coordinates = {
'lon': self.dataset['lon'],
'lat': self.dataset['lat'],
}
def bounds(self) -> tuple:
"""
Get coordinate bounds of observation.
:return: tuple of bounds (west, south, east, north)
"""
return self.data_extent.bounds
def cell_size(self) -> tuple:
"""
Get cell sizes of observation.
:return: tuple of cell sizes (x_size, y_size)
"""
return self.dataset.geospatial_lon_resolution, self.dataset.geospatial_lat_resolution
def data(self, variable: str = 'sst', correct_sses=True) -> numpy.array:
"""
Retrieve data of given variable. Use 'sst_sses' to retrieve SST corrected with sensor-specific error statistic (SSES)
:param variable: variable name (one of 'sst', 'sses', or 'sst_sses')
:param correct_sses: whether to apply sensor bias
:return: matrix of data in Celsius
"""
if variable == 'sst':
return self._sst(correct_sses)
elif variable == 'sses':
return self._sses()
def _sst(self, correct_sses: bool = False) -> numpy.array:
"""
Return matrix of sea surface temperature.
:param correct_sses: whether to apply sensor bias
:return: matrix of SST in Celsius
"""
# observation SST data (masked array) using vertically reflected VIIRS grid
output_sst_data = self.dataset['sea_surface_temperature'].values
# check for unmasked data
if not numpy.isnan(output_sst_data).all():
if numpy.nanmax(output_sst_data) > 0:
if numpy.nanmin(output_sst_data) <= 0:
output_sst_data[output_sst_data <= 0] = numpy.nan
if correct_sses:
sses = self._sses()
mismatched_records = len(
numpy.where(numpy.isnan(output_sst_data) != (sses == 0))[0]
)
total_records = output_sst_data.shape[0] * output_sst_data.shape[1]
mismatch_percentage = mismatched_records / total_records * 100
if mismatch_percentage > 0:
LOGGER.warning(
f'{self.data_time} UTC: SSES extent mismatch at {mismatch_percentage:.1f}%'
)
output_sst_data -= sses
# convert from Kelvin to Celsius (subtract 273.15)
output_sst_data -= 273.15
else:
output_sst_data[:] = numpy.nan
return output_sst_data
def _sses(self) -> numpy.array:
"""
Return matrix of sensor-specific error statistics.
:return: array of SSES bias in Celsius
"""
# observation bias values using vertically reflected VIIRS grid
sses_data = self.dataset['sses_bias'].values
# replace masked values with 0
sses_data[numpy.isnan(sses_data)] = 0
# negative offset by 2.048
sses_data -= 2.048
return sses_data
def write_rasters(
self,
output_dir: PathLike,
variables: Collection[str] = ('sst', 'sses'),
filename_prefix: str = 'viirs',
fill_value: float = LEAFLET_NODATA_VALUE,
driver: str = 'GTiff',
correct_sses: bool = False,
):
"""
Write VIIRS rasters to file using data from given variables.
:param output_dir: path to output directory
:param variables: variable names to write
:param filename_prefix: prefix for output filenames
:param fill_value: desired fill value of output
:param driver: strings of valid GDAL driver (currently one of 'GTiff', 'GPKG', or 'AAIGrid')
:param correct_sses: whether to subtract SSES bias from SST
"""
if not isinstance(output_dir, Path):
output_dir = Path(output_dir)
for variable in variables:
input_data = self.data(variable, correct_sses)
if variable == 'sses':
fill_value = 0
if input_data is not None and not numpy.isnan(input_data).all():
if fill_value is not None:
input_data[numpy.isnan(input_data)] = fill_value
gdal_args = {
'height': input_data.shape[0],
'width': input_data.shape[1],
'count': 1,
'dtype': rasterio.float32,
'crs': CRS.from_dict(OUTPUT_CRS),
'transform': VIIRSDataset.study_area_transform,
'nodata': fill_value,
}
if driver == 'AAIGrid':
file_extension = 'asc'
gdal_args.update({'FORCE_CELLSIZE': 'YES'})
elif driver == 'GPKG':
file_extension = 'gpkg'
else:
file_extension = 'tiff'
gdal_args.update(TIFF_CREATION_OPTIONS)
output_filename = output_dir / f'{filename_prefix}_{variable}.{file_extension}'
# use rasterio to write to raster with GDAL args
LOGGER.info(f'Writing to {output_filename}')
with rasterio.open(output_filename, 'w', driver, **gdal_args) as output_raster:
output_raster.write(input_data, 1)
if driver == 'GTiff':
output_raster.build_overviews(
PyOFS.overview_levels(input_data.shape), Resampling['average']
)
output_raster.update_tags(ns='rio_overview', resampling='average')
def __repr__(self):
used_params = [self.data_time.__repr__()]
optional_params = [
self.satellite,
self.study_area_polygon_filename,
self.near_real_time,
self.algorithm,
self.version,
]
for param in optional_params:
if param is not None:
if 'str' in str(type(param)):
param = f'"{param}"'
else:
param = str(param)
used_params.append(param)
return f'{self.__class__.__name__}({str(", ".join(used_params))})'
class VIIRSRange:
"""
Range of VIIRS observation.
"""
study_area_transform = None
study_area_index_bounds = None
def __init__(
self,
start_time: datetime,
end_time: datetime,
satellites: list = ('NPP', 'N20'),
study_area_polygon_filename: PathLike = STUDY_AREA_POLYGON_FILENAME,
pass_times_filename: PathLike = PASS_TIMES_FILENAME,
algorithm: str = 'OSPO',
version: str = None,
):
"""
Collect VIIRS datasets within time interval.
:param start_time: beginning of time interval (in UTC)
:param end_time: end of time interval (in UTC)
:param satellites: VIIRS platforms
:param study_area_polygon_filename: filename of vector file of study area boundary
:param pass_times_filename: path to text file with pass times
:param algorithm: either 'STAR' or 'OSPO'
:param version: ACSPO algorithm version
:raises NoDataError: if data does not exist
"""
if not isinstance(study_area_polygon_filename, Path):
study_area_polygon_filename = Path(study_area_polygon_filename)
if not isinstance(pass_times_filename, Path):
pass_times_filename = Path(pass_times_filename)
self.start_time = start_time
if end_time > datetime.utcnow():
# VIIRS near real time delay is 2 hours behind UTC
self.end_time = datetime.utcnow() - NRT_DELAY
else:
self.end_time = end_time
self.satellites = satellites
self.study_area_polygon_filename = study_area_polygon_filename
self.viirs_pass_times_filename = pass_times_filename
self.algorithm = algorithm
self.version = version
if 'N20' in self.satellites:
self.pass_times = get_pass_times(
self.start_time, self.end_time, self.viirs_pass_times_filename
)-timedelta(minutes=50)
else:
self.pass_times = get_pass_times(
self.start_time, self.end_time, self.viirs_pass_times_filename
)
if len(self.pass_times) > 0:
LOGGER.info(
f'Collecting VIIRS data from {len(self.pass_times)} passes between {numpy.min(self.pass_times)} UTC and {numpy.max(self.pass_times)} UTC...'
)
# create dictionary to store scenes
self.datasets = {pass_time: {} for pass_time in self.pass_times}
with futures.ThreadPoolExecutor() as concurrency_pool:
for satellite in self.satellites:
running_futures = {}
for pass_time in self.pass_times:
running_future = concurrency_pool.submit(
VIIRSDataset,
data_time=pass_time,
study_area_polygon_filename=self.study_area_polygon_filename,
algorithm=self.algorithm,
version=self.version,
satellite=satellite,
)
running_futures[running_future] = pass_time
for completed_future in futures.as_completed(running_futures):
if completed_future.exception() is None:
pass_time = running_futures[completed_future]
viirs_dataset = completed_future.result()
self.datasets[pass_time][satellite] = viirs_dataset
else:
LOGGER.warning(
f'Dataset creation error: {completed_future.exception()}'
)
del running_futures
if len(self.datasets) > 0:
VIIRSRange.study_area_transform = VIIRSDataset.study_area_transform
VIIRSRange.study_area_extent = VIIRSDataset.study_area_extent
VIIRSRange.study_area_bounds = VIIRSDataset.study_area_bounds
LOGGER.debug(f'VIIRS data was found in {len(self.datasets)} passes.')
else:
raise PyOFS.NoDataError(
f'No VIIRS datasets found between {self.start_time} UTC and {self.end_time} UTC.'
)
else:
raise PyOFS.NoDataError(
f'There are no VIIRS passes between {self.start_time} UTC and {self.end_time} UTC.'
)
def cell_size(self) -> tuple:
"""
Get cell sizes of observation.
:return: tuple of cell sizes (x_size, y_size)
"""
sample_dataset = next(iter(self.datasets.values()))
return (
sample_dataset.netcdf_dataset.geospatial_lon_resolution,
sample_dataset.netcdf_dataset.geospatial_lat_resolution,
)
def data(
self,
start_time: datetime = None,
end_time: datetime = None,
average: bool = False,
correct_sses: bool = False,
variables: Collection[str] = tuple('sst'),
satellite: str = None,
) -> dict:
"""
Get VIIRS data (either overlapped or averaged) from the given time interval.
:param start_time: beginning of time interval (in UTC)
:param end_time: end of time interval (in UTC)
:param average: whether to average rasters, otherwise overlap them
:param correct_sses: whether to subtract SSES bias from L3 sea surface temperature data
:param variables: variables to write (either 'sst' or 'sses')
:param satellite: VIIRS platform to retrieve. Default: per-granule averages of platform datasets
:return dictionary of data per variable
"""
start_time = start_time if start_time is not None else self.start_time
end_time = end_time if end_time is not None else self.end_time
dataset_times = numpy.sort(list(self.datasets.keys()))
# find first and last times within specified time interval
start_index = numpy.searchsorted(dataset_times, start_time)
end_index = numpy.searchsorted(dataset_times, end_time)
pass_times = dataset_times[start_index:end_index]
if variables is None:
variables = ['sst', 'sses']
variables_data = {}
for variable in variables:
scenes_data = []
for pass_time in pass_times:
if len(self.datasets[pass_time]) > 0:
if satellite is not None and satellite in self.datasets[pass_time]:
dataset = self.datasets[pass_time][satellite]
scene_data = dataset.data(variable, correct_sses)
else:
scene_data = numpy.nanmean(
numpy.stack(
[
dataset.data(variable, correct_sses)
for dataset in self.datasets[pass_time].values()
],
axis=0,
),
axis=0,
)
if numpy.any(~numpy.isnan(scene_data)):
scenes_data.append(scene_data)
variable_data = numpy.empty(
(
VIIRSDataset.study_area_coordinates['lat'].shape[0],
VIIRSDataset.study_area_coordinates['lon'].shape[0],
)
)
variable_data[:] = numpy.nan
if len(scenes_data) > 0:
# check if user wants to average data
if average:
variable_data = numpy.nanmean(numpy.stack(scenes_data, axis=0), axis=0)
else: # otherwise overlap based on datetime
for scene_data in scenes_data:
if variable == 'sses':
scene_data[scene_data == 0] = numpy.nan
variable_data[~numpy.isnan(scene_data)] = scene_data[
~numpy.isnan(scene_data)
]
variables_data[variable] = variable_data
return variables_data
def write_rasters(
self,
output_dir: PathLike,
variables: Collection[str] = ('sst', 'sses'),
filename_prefix: str = 'viirs',
fill_value: float = None,
driver: str = 'GTiff',
correct_sses: bool = False,
satellite: str = None,
):
"""
Write individual VIIRS rasters to directory.
:param output_dir: path to output directory
:param variables: variable names to write
:param filename_prefix: prefix for output filenames
:param fill_value: desired fill value of output
:param driver: string of valid GDAL driver (currently one of 'GTiff', 'GPKG', or 'AAIGrid')
:param correct_sses: whether to subtract SSES bias from L3 sea surface temperature data
:param satellite: VIIRS platform to retrieve; if not specified, will average from both satellites
"""
if not isinstance(output_dir, Path):
output_dir = Path(output_dir)
# write a raster for each pass retrieved scene
with futures.ThreadPoolExecutor() as concurrency_pool:
for dataset_time, current_satellite in self.datasets.items():
if current_satellite is None or current_satellite == satellite:
dataset = self.datasets[dataset_time][current_satellite]
concurrency_pool.submit(
dataset.write_rasters,
output_dir,
variables=variables,
filename_prefix=f'{filename_prefix}_{dataset_time:%Y%m%d%H%M%S}',
fill_value=fill_value,
drivers=driver,
correct_sses=correct_sses,
)
def write_raster(
self,
output_dir: PathLike,
filename_prefix: str = None,
filename_suffix: str = None,
start_time: datetime = None,
end_time: datetime = None,
average: bool = False,
fill_value: float = LEAFLET_NODATA_VALUE,
driver: str = 'GTiff',
correct_sses: bool = False,
variables: Collection[str] = tuple(['sst']),
satellite: str = None,
):
"""
Write VIIRS raster of SST data (either overlapped or averaged) from the given time interval.
:param output_dir: path to output directory
:param filename_prefix: prefix for output filenames
:param filename_suffix: suffix for output filenames
:param start_time: beginning of time interval (in UTC)
:param end_time: end of time interval (in UTC)
:param average: whether to average rasters, otherwise overlap them
:param fill_value: desired fill value of output
:param driver: string of valid GDAL driver (currently one of 'GTiff', 'GPKG', or 'AAIGrid')
:param correct_sses: whether to subtract SSES bias from L3 sea surface temperature data
:param variables: variables to write (either 'sst' or 'sses')
:param satellite: VIIRS platform to retrieve; if not specified, will average from both satellites
"""
if not isinstance(output_dir, Path):
output_dir = Path(output_dir)
if start_time is None:
start_time = self.start_time
if end_time is None:
end_time = self.end_time
variable_data = self.data(
start_time, end_time, average, correct_sses, variables, satellite
)
for variable, output_data in variable_data.items():
if output_data is not None and numpy.any(~numpy.isnan(output_data)):
output_data[numpy.isnan(output_data)] = fill_value
raster_data = output_data.astype(rasterio.float32)
if fill_value is not None:
raster_data[numpy.isnan(raster_data)] = fill_value
# define arguments to GDAL driver
gdal_args = {
'height': raster_data.shape[0],
'width': raster_data.shape[1],
'count': 1,
'crs': OUTPUT_CRS,
'dtype': raster_data.dtype,
'nodata': numpy.array([fill_value]).astype(raster_data.dtype).item(),
'transform': VIIRSRange.study_area_transform,
}
if driver == 'AAIGrid':
file_extension = 'asc'
gdal_args.update({'FORCE_CELLSIZE': 'YES'})
elif driver == 'GPKG':
file_extension = 'gpkg'
else:
file_extension = 'tiff'
gdal_args.update(TIFF_CREATION_OPTIONS)
if filename_prefix is None:
current_filename_prefix = f'{satellite}_viirs_{variable}'
else:
current_filename_prefix = filename_prefix
if filename_suffix is None:
start_time_string = f'{start_time:%Y%m%d%H%M}'
end_time_string = f'{end_time:%Y%m%d%H%M}'
if '0000' in start_time_string and '0000' in end_time_string:
start_time_string = start_time_string.replace('0000', '')
end_time_string = end_time_string.replace('0000', '')
current_filename_suffix = f'{start_time_string}_{end_time_string}'
else:
current_filename_suffix = filename_suffix
output_filename = (
output_dir
/ f'{current_filename_prefix}_{current_filename_suffix}.{file_extension}'
)
LOGGER.info(f'Writing {output_filename}')
with rasterio.open(output_filename, 'w', driver, **gdal_args) as output_raster:
output_raster.write(raster_data, 1)
if driver == 'GTiff':
output_raster.build_overviews(
PyOFS.overview_levels(raster_data.shape), Resampling['average']
)
output_raster.update_tags(ns='rio_overview', resampling='average')
else:
LOGGER.warning(
f'No {"VIIRS" if satellite is None else "VIIRS " + satellite} {variable} found between {start_time} and {end_time}.'
)
def to_xarray(
self,
variables: Collection[str] = ('sst', 'sses'),
mean: bool = True,
correct_sses: bool = False,
satellites: list = None,
) -> xarray.Dataset:
"""
Converts to xarray Dataset.
:param variables: variables to use
:param mean: whether to average all time indices
:param correct_sses: whether to subtract SSES bias from L3 sea surface temperature data
:param satellites: VIIRS platforms to retrieve; if not specified, will average from both satellites
:return: xarray observation of given variables
"""
output_dataset = xarray.Dataset()
coordinates = OrderedDict(
{
'lat': VIIRSDataset.study_area_coordinates['lat'],
'lon': VIIRSDataset.study_area_coordinates['lon'],
}
)
if satellites is not None:
coordinates['satellite'] = satellites
satellites_data = [
self.data(
average=mean,
correct_sses=correct_sses,
variables=variables,
satellite=satellite,
)
for satellite in satellites
]
variables_data = {}
for variable in variables:
satellites_variable_data = [
satellite_data[variable]
for satellite_data in satellites_data
if satellite_data[variable] is not None
]
variables_data[variable] = numpy.stack(satellites_variable_data, axis=2)
else:
variables_data = self.data(
average=mean, correct_sses=correct_sses, variables=variables
)
for variable, variable_data in variables_data.items():
output_dataset.update(
{
variable: xarray.DataArray(
variable_data, coords=coordinates, dims=tuple(coordinates.keys())
)
}
)
return output_dataset
def to_netcdf(
self,
output_file: str,
variables: Collection[str] = None,
mean: bool = True,
correct_sses: bool = False,
satellites: list = None,
):
"""
Writes to NetCDF file.
:param output_file: output file to write
:param variables: variables to use
:param mean: whether to average all time indices
:param correct_sses: whether to subtract SSES bias from L3 sea surface temperature data
:param satellites: VIIRS platforms to retrieve; if not specified, will average from both satellites
"""
self.to_xarray(variables, mean, correct_sses, satellites).to_netcdf(output_file)
def __repr__(self):
used_params = [self.start_time.__repr__(), self.end_time.__repr__()]
optional_params = [
self.satellites,
self.study_area_polygon_filename,
self.viirs_pass_times_filename,
self.algorithm,
self.version,
]
for param in optional_params:
if param is not None:
if 'str' in str(type(param)):
param = f'"{param}"'
else:
param = str(param)
used_params.append(param)
return f'{self.__class__.__name__}({", ".join(used_params)})'
def store_viirs_pass_times(
satellite: str,
study_area_polygon_filename: PathLike = STUDY_AREA_POLYGON_FILENAME,
start_time: datetime = VIIRS_START_TIME,
output_filename: str = PASS_TIMES_FILENAME,
num_periods: int = 1,
algorithm: str = 'STAR',
version: str = '2.40',
):
"""
Compute VIIRS pass times from the given start date along the number of periods specified.
:param satellite: satellite for which to store pass times, either NPP or N20
:param study_area_polygon_filename: path to vector file containing polygon of study area
:param start_time: beginning of given VIIRS period (in UTC)
:param output_filename: path to output file
:param num_periods: number of periods to store
:param algorithm: either 'STAR' or 'OSPO'
:param version: ACSPO Version number (2.40 - 2.41)
"""
if not isinstance(study_area_polygon_filename, Path):
study_area_polygon_filename = Path(study_area_polygon_filename)
start_time = PyOFS.round_to_ten_minutes(start_time)
end_time = PyOFS.round_to_ten_minutes(start_time + (VIIRS_PERIOD * num_periods))
LOGGER.info(
f'Getting pass times between {start_time:%Y-%m-%d %H:%M:%S} and {end_time:%Y-%m-%d %H:%M:%S}'
)
datetime_range = PyOFS.ten_minute_range(start_time, end_time)
# construct polygon from the first record in layer
study_area_polygon = shapely.geometry.Polygon(
utilities.get_first_record(study_area_polygon_filename)['geometry']['coordinates'][0]
)
lines = []
for datetime_index in range(len(datetime_range)):
current_time = datetime_range[datetime_index]
# find number of cycles from the first orbit to the present day
num_cycles = int((datetime.now() - start_time).days / 16)
# iterate over each cycle
for cycle_index in range(0, num_cycles):
# get current datetime of interest
cycle_offset = VIIRS_PERIOD * cycle_index
cycle_time = current_time + cycle_offset
try:
# get observation of new datetime
dataset = VIIRSDataset(
cycle_time, satellite, study_area_polygon_filename, algorithm, version
)
# check if observation falls within polygon extent
if dataset.data_extent.is_valid:
if study_area_polygon.intersects(dataset.data_extent):
# get duration from current cycle start
cycle_duration = cycle_time - (start_time + cycle_offset)
LOGGER.info(
f'{cycle_time:%Y%m%dT%H%M%S} {cycle_duration / timedelta(seconds=1)}: valid scene (checked {cycle_index + 1} cycle(s))'
)
lines.append(
f'{cycle_time:%Y%m%dT%H%M%S},{cycle_duration / timedelta(seconds=1)}'
)
# if we get to here, break and continue to the next datetime
break
except PyOFS.NoDataError as error:
LOGGER.warning(f'{error.__class__.__name__}: {error}')
else:
LOGGER.warning(
f'{current_time:%Y%m%dT%H%M%S}: missing observation across all cycles'
)
# write lines to file
with open(output_filename, 'w') as output_file:
output_file.write('\n'.join(lines))
LOGGER.info('Wrote data to file')
def get_pass_times(
start_time: datetime,
end_time: datetime,
pass_times_filename: PathLike = PASS_TIMES_FILENAME,
):
"""
Retreive array of datetimes of VIIRS passes within the given time interval, given initial period durations.
:param start_time: beginning of time interval (in UTC)
:param end_time: end of time interval (in UTC)
:param pass_times_filename: filename of text file with durations of first VIIRS period
:return:
"""
if not isinstance(pass_times_filename, Path):
pass_times_filename = Path(pass_times_filename)
# get datetime of first pass in given file
first_pass_row = numpy.genfromtxt(pass_times_filename, dtype=str, delimiter=',')[0, :]
viirs_start_time = datetime.strptime(first_pass_row[0], '%Y%m%dT%H%M%S') - timedelta(
seconds=float(first_pass_row[1])
)
# get starting datetime of the current VIIRS period
period_start_time = viirs_start_time + timedelta(
days=numpy.floor((start_time - viirs_start_time).days / 16) * 16
)
# get array of seconds since the start of the first 16-day VIIRS period
pass_durations = numpy.genfromtxt(pass_times_filename, dtype=str, delimiter=',')[
:, 1
].T.astype(numpy.float32)
pass_durations = numpy.asarray(
[timedelta(seconds=float(duration)) for duration in pass_durations]
)
# add extra VIIRS periods to end of pass durations
if end_time > (period_start_time + VIIRS_PERIOD):
extra_periods = math.ceil((end_time - period_start_time) / VIIRS_PERIOD) - 1
for period in range(extra_periods):
pass_durations = numpy.append(
pass_durations, pass_durations[-360:] + pass_durations[-1]
)
# get datetimes of VIIRS passes within the given time interval
pass_times = period_start_time + pass_durations
# find starting and ending times within the given time interval
start_index = numpy.searchsorted(pass_times, start_time)
end_index = numpy.searchsorted(pass_times, end_time)
# ensure at least one datetime in range
if start_index == end_index:
end_index += 1
# trim datetimes to within the given time interval
pass_times = pass_times[start_index:end_index]
return pass_times
if __name__ == '__main__':
output_dir = DATA_DIRECTORY / 'output' / 'test'
start_time = datetime.utcnow() - timedelta(days=1)
end_time = start_time + timedelta(days=1)
viirs_range = VIIRSRange(start_time, end_time)
viirs_range.write_raster(output_dir)
print('done')
|
#!/usr/bin/env python
"""
ViperMonkey: VBA Grammar - Library Functions
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: Philippe Lagadec - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2016 Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '0.02'
# --- IMPORTS ------------------------------------------------------------------
from curses_ascii import isprint
import logging
from pyparsing import *
from vba_object import *
from literals import *
import vb_str
from logger import log
# --- VBA Expressions ---------------------------------------------------------
# 5.6 Expressions
# See below
# any VBA expression: need to pre-declare using Forward() because it is recursive
expression = Forward()
# --- CHR --------------------------------------------------------------------
class Chr(VBA_Object):
"""
6.1.2.11.1.4 VBA Chr function
"""
def __init__(self, original_str, location, tokens):
super(Chr, self).__init__(original_str, location, tokens)
# extract argument from the tokens:
# Here the arg is expected to be either an int or a VBA_Object
self.arg = tokens[0]
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('parsed %r as %s' % (self, self.__class__.__name__))
def to_python(self, context, params=None, indent=0):
arg_str = to_python(self.arg, context)
r = "core.vba_library.run_function(\"_Chr\", vm_context, [" + arg_str + "])"
return r
def return_type(self):
return "STRING"
def eval(self, context, params=None):
# This is implemented in the common vba_library._Chr handler class.
import vba_library
chr_handler = vba_library._Chr()
param = eval_arg(self.arg, context)
return chr_handler.eval(context, [param])
def __repr__(self):
return 'Chr(%s)' % repr(self.arg)
# Chr, Chr$, ChrB, ChrW()
chr_ = (
Suppress(Regex(re.compile('Chr[BW]?\$?', re.IGNORECASE)))
+ Suppress('(')
+ expression
+ Suppress(')')
)
chr_.setParseAction(Chr)
# --- ASC --------------------------------------------------------------------
class Asc(VBA_Object):
"""
VBA Asc function
"""
def __init__(self, original_str, location, tokens):
super(Asc, self).__init__(original_str, location, tokens)
# This could be a asc(...) call or a reference to a variable called asc.
# If there are parsed arguments it is a call.
self.arg = None
if (len(tokens) > 0):
# Here the arg is expected to be either a character or a VBA_Object
self.arg = tokens[0]
def to_python(self, context, params=None, indent=0):
return "ord(" + to_python(self.arg, context) + ")"
def return_type(self):
return "INTEGER"
def eval(self, context, params=None):
# Are we just looking up a variable called 'asc'?
if (self.arg is None):
try:
return context.get("asc")
except KeyError:
return "NULL"
# Eval the argument.
c = eval_arg(self.arg, context)
# Don't modify the "**MATCH ANY**" special value.
c_str = None
try:
c_str = str(c).strip()
except UnicodeEncodeError:
c_str = filter(isprint, c).strip()
if (c_str == "**MATCH ANY**"):
return c
# Looks like Asc(NULL) is NULL?
if (c == "NULL"):
return 0
# Calling Asc() on int?
if (isinstance(c, int)):
r = c
else:
# Got a string.
# Should this match anything?
if (c_str == "**MATCH ANY**"):
r = "**MATCH ANY**"
# This is an unmodified Asc() call.
else:
r = vb_str.get_ms_ascii_value(c_str)
# Return the result.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Asc: return %r" % r)
return r
def __repr__(self):
return 'Asc(%s)' % repr(self.arg)
# Asc()
# TODO: see MS-VBAL 6.1.2.11.1.1 page 240 => AscB, AscW
asc = Suppress((CaselessKeyword('Asc') | CaselessKeyword('AscW'))) + Optional(Suppress('(') + expression + Suppress(')'))
asc.setParseAction(Asc)
# --- StrReverse() --------------------------------------------------------------------
class StrReverse(VBA_Object):
"""
VBA StrReverse function
"""
def __init__(self, original_str, location, tokens):
super(StrReverse, self).__init__(original_str, location, tokens)
# extract argument from the tokens:
# Here the arg is expected to be either a string or a VBA_Object
self.arg = tokens[0]
def return_type(self):
return "STRING"
def eval(self, context, params=None):
# return the string with all characters in reverse order:
return eval_arg(self.arg, context)[::-1]
def __repr__(self):
return 'StrReverse(%s)' % repr(self.arg)
# StrReverse()
strReverse = Suppress(CaselessLiteral('StrReverse') + Literal('(')) + expression + Suppress(Literal(')'))
strReverse.setParseAction(StrReverse)
# --- ENVIRON() --------------------------------------------------------------------
class Environ(VBA_Object):
"""
VBA Environ function
"""
def __init__(self, original_str, location, tokens):
super(Environ, self).__init__(original_str, location, tokens)
# extract argument from the tokens:
# Here the arg is expected to be either a string or a VBA_Object
self.arg = tokens.arg
def return_type(self):
return "STRING"
def eval(self, context, params=None):
# return the environment variable name surrounded by % signs:
# e.g. Environ("TEMP") => "%TEMP%"
arg = eval_arg(self.arg, context=context)
value = '%%%s%%' % arg
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug('evaluating Environ(%s) => %r' % (arg, value))
return value
def __repr__(self):
return 'Environ(%s)' % repr(self.arg)
# Environ("name") => just translated to "%name%", that is enough for malware analysis
environ = Suppress(CaselessKeyword('Environ') + '(') + expression('arg') + Suppress(')')
environ.setParseAction(Environ)
|
# sigtools - Collection of Python modules for manipulating function signatures
# Copyright (C) 2013-2021 Yann Kaiser
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
import collections
from functools import partial
from sigtools import _util
try:
zip_longest = itertools.izip_longest
except AttributeError: # pragma: no cover
zip_longest = itertools.zip_longest
class Signature(_util.funcsigs.Signature):
__slots__ = _util.funcsigs.Signature.__slots__ + ('sources',)
def __init__(self, *args, **kwargs):
self.sources = kwargs.pop('sources', {})
super(Signature, self).__init__(*args, **kwargs)
@classmethod
def upgrade(cls, inst, sources):
if isinstance(inst, cls):
return inst
return cls(
inst.parameters.values(),
return_annotation=inst.return_annotation,
sources=sources)
def replace(self, *args, **kwargs):
try:
sources = kwargs.pop('sources')
except KeyError:
sources = self.sources
ret = super(Signature, self).replace(*args, **kwargs)
ret.sources = sources
return ret
def default_sources(sig, obj):
srcs = dict((pname, [obj]) for pname in sig.parameters)
srcs['+depths'] = {obj: 0}
return srcs
def set_default_sources(sig, obj):
"""Assigns the source of every parameter of sig to obj"""
return Signature.upgrade(sig, default_sources(sig, obj))
def signature(obj):
"""Retrieves to unmodified signature from ``obj``, without taking
`sigtools.specifiers` decorators into account or attempting automatic
signature discovery.
"""
if isinstance(obj, partial):
sig = _util.funcsigs.signature(obj.func)
sig = set_default_sources(sig, obj.func)
return _mask(sig, len(obj.args), False, False, False, False,
obj.keywords or {}, obj)
sig =_util.funcsigs.signature(obj)
return set_default_sources(sig, obj)
def copy_sources(src, func_swap={}, increase=False):
ret = dict(
(k, [func_swap.get(f, f) for f in v])
for k, v in src.items()
if k != '+depths')
ret['+depths'] = dict(
(func_swap.get(f, f), v + increase)
for f, v in src.get('+depths', {}).items())
return ret
SortedParameters = collections.namedtuple(
'SortedParameters',
'posargs pokargs varargs kwoargs varkwargs sources')
def sort_params(sig, sources=False):
"""Classifies the parameters from sig.
:param inspect.Signature sig: The signature to operate on
:returns: A tuple ``(posargs, pokargs, varargs, kwoargs, varkwas)``
:rtype: ``(list, list, Parameter or None, dict, Parameter or None)``
::
>>> from sigtools import signatures, support
>>> from pprint import pprint
>>> pprint(signatures.sort_params(support.s('a, /, b, *args, c, d')))
([<Parameter at 0x7fdda4e89418 'a'>],
[<Parameter at 0x7fdda4e89470 'b'>],
<Parameter at 0x7fdda4e89c58 'args'>,
{'c': <Parameter at 0x7fdda4e89c00 'c'>,
'd': <Parameter at 0x7fdda4e89db8 'd'>},
None)
"""
posargs = []
pokargs = []
varargs = None
kwoargs = _util.OrderedDict()
varkwas = None
for param in sig.parameters.values():
if param.kind == param.POSITIONAL_ONLY:
posargs.append(param)
elif param.kind == param.POSITIONAL_OR_KEYWORD:
pokargs.append(param)
elif param.kind == param.VAR_POSITIONAL:
varargs = param
elif param.kind == param.KEYWORD_ONLY:
kwoargs[param.name] = param
elif param.kind == param.VAR_KEYWORD:
varkwas = param
else:
raise AssertionError('Unknown param kind {0}'.format(param.kind))
if sources:
src = getattr(sig, 'sources', {})
return SortedParameters(posargs, pokargs, varargs, kwoargs, varkwas,
copy_sources(src))
else:
return posargs, pokargs, varargs, kwoargs, varkwas
def apply_params(sig, posargs, pokargs, varargs, kwoargs, varkwargs,
sources=None):
"""Reverses `sort_params`'s operation.
:returns: A new `inspect.Signature` object based off sig,
with the given parameters.
"""
parameters = []
parameters.extend(posargs)
parameters.extend(pokargs)
if varargs:
parameters.append(varargs)
parameters.extend(kwoargs.values())
if varkwargs:
parameters.append(varkwargs)
sig = sig.replace(parameters=parameters)
if sources is not None:
sig = Signature.upgrade(sig, sources)
sig.sources = sources
return sig
class IncompatibleSignatures(ValueError):
"""Raised when two or more signatures are incompatible for the requested
operation.
:ivar inspect.Signature sig: The signature at which point the
incompatibility was discovered
:ivar others: The signatures up until ``sig``
"""
def __init__(self, sig, others):
self.sig = sig
self.others = others
def __str__(self):
return '{0} {1}'.format(
' '.join(str(sig) for sig in self.others),
self.sig,
)
def _add_sources(ret_src, name, *from_sources):
target = ret_src.setdefault(name, [])
target.extend(itertools.chain.from_iterable(
src.get(name, ()) for src in from_sources))
def _add_all_sources(ret_src, params, from_source):
"""Adds the sources from from_source of all given parameters into the
lhs sources multidict"""
for param in params:
ret_src.setdefault(param.name, []).extend(
from_source.get(param.name, ()))
def _exclude_from_seq(seq, el):
for i, x in enumerate(seq):
if el is x:
seq[i] = None
break
def merge_depths(l, r):
ret = dict(l)
for func, depth in r.items():
if func in ret and depth > ret[func]:
continue
ret[func] = depth
return ret
class _Merger(object):
def __init__(self, left, right):
self.l = left
self.r = right
self.performed = False
def perform_once(self):
self.performed = True
self._merge()
def __iter__(self):
self.perform_once()
ret = (
self.posargs, self.pokargs, self.varargs,
self.kwoargs, self.varkwargs,
self.src)
return iter(ret)
def _merge(self):
self.posargs = []
self.pokargs = []
self.varargs_src = [self.l.varargs, self.r.varargs]
self.kwoargs = _util.OrderedDict()
self.varkwargs_src = [
self.l.varkwargs,
self.r.varkwargs
]
self.src = {'+depths': self._merge_depths()}
self.l_unmatched_kwoargs = _util.OrderedDict()
for param in self.l.kwoargs.values():
name = param.name
if name in self.r.kwoargs:
self.kwoargs[name] = self._concile_meta(
param, self.r.kwoargs[name])
self.src[name] = list(itertools.chain(
self.l.sources.get(name, ()), self.r.sources.get(name, ())))
else:
self.l_unmatched_kwoargs[param.name] = param
self.r_unmatched_kwoargs = _util.OrderedDict()
for param in self.r.kwoargs.values():
if param.name not in self.l.kwoargs:
self.r_unmatched_kwoargs[param.name] = param
il_pokargs = iter(self.l.pokargs)
ir_pokargs = iter(self.r.pokargs)
for l_param, r_param in zip_longest(self.l.posargs, self.r.posargs):
if l_param and r_param:
p = self._concile_meta(l_param, r_param)
self.posargs.append(p)
if l_param.name == r_param.name:
_add_sources(self.src, l_param.name,
self.l.sources, self.r.sources)
else:
_add_sources(self.src, l_param.name, self.l.sources)
else:
if l_param:
self._merge_unbalanced_pos(
l_param, self.l.sources,
ir_pokargs, self.r.varargs, self.r.sources)
else:
self._merge_unbalanced_pos(
r_param, self.r.sources,
il_pokargs, self.l.varargs, self.l.sources)
for l_param, r_param in zip_longest(il_pokargs, ir_pokargs):
if l_param and r_param:
if l_param.name == r_param.name:
self.pokargs.append(self._concile_meta(l_param, r_param))
_add_sources(self.src, l_param.name,
self.l.sources, self.r.sources)
else:
for i, pokarg in enumerate(self.pokargs):
self.pokargs[i] = pokarg.replace(
kind=pokarg.POSITIONAL_ONLY)
self.pokargs.append(
self._concile_meta(l_param, r_param)
.replace(kind=l_param.POSITIONAL_ONLY))
_add_sources(self.src, l_param.name, self.l.sources)
else:
if l_param:
self._merge_unbalanced_pok(
l_param, self.l.sources,
self.r.varargs, self.r.varkwargs,
self.r_unmatched_kwoargs, self.r.sources)
else:
self._merge_unbalanced_pok(
r_param, self.r.sources,
self.l.varargs, self.l.varkwargs,
self.l_unmatched_kwoargs, self.l.sources)
if self.l_unmatched_kwoargs:
self._merge_unmatched_kwoargs(
self.l_unmatched_kwoargs, self.r.varkwargs, self.l.sources)
if self.r_unmatched_kwoargs:
self._merge_unmatched_kwoargs(
self.r_unmatched_kwoargs, self.l.varkwargs, self.r.sources)
self.varargs = self._add_starargs(
self.varargs_src, self.l.varargs, self.r.varargs)
self.varkwargs = self._add_starargs(
self.varkwargs_src, self.l.varkwargs, self.r.varkwargs)
def _merge_depths(self):
return merge_depths(self.l.sources.get('+depths', {}),
self.r.sources.get('+depths', {}))
def _add_starargs(self, which, left, right):
if not left or not right:
return None
if all(which):
ret = self._concile_meta(left, right)
if left.name == right.name:
_add_sources(self.src, ret.name,
self.l.sources, self.r.sources)
else:
_add_sources(self.src, ret.name, self.l.sources)
elif which[0]:
ret = left
_add_sources(self.src, ret.name, self.l.sources)
else:
ret = right
_add_sources(self.src, ret.name, self.r.sources)
return ret
def _merge_unbalanced_pos(self, existing, src,
convert_from, o_varargs, o_src):
try:
other = next(convert_from)
except StopIteration:
if o_varargs:
self.posargs.append(existing)
_add_sources(self.src, existing.name, src)
_exclude_from_seq(self.varargs_src, o_varargs)
elif existing.default == existing.empty:
raise ValueError('Unmatched positional parameter: {0}'
.format(existing))
else:
self.posargs.append(self._concile_meta(existing, other))
_add_sources(self.src, existing.name, src)
def _merge_unbalanced_pok(
self, existing, src,
o_varargs, o_varkwargs, o_kwargs_limbo, o_src):
"""tries to insert positional-or-keyword parameters for which there were
no matched positional parameter"""
if existing.name in o_kwargs_limbo:
self.kwoargs[existing.name] = self._concile_meta(
existing, o_kwargs_limbo.pop(existing.name)
).replace(kind=existing.KEYWORD_ONLY)
_add_sources(self.src, existing.name, o_src, src)
elif o_varargs and o_varkwargs:
self.pokargs.append(existing)
_add_sources(self.src, existing.name, src)
elif o_varkwargs:
# convert to keyword argument
self.kwoargs[existing.name] = existing.replace(
kind=existing.KEYWORD_ONLY)
_add_sources(self.src, existing.name, src)
elif o_varargs:
# convert along with all preceeding to positional args
self.posargs.extend(
a.replace(kind=a.POSITIONAL_ONLY)
for a in self.pokargs)
self.pokargs[:] = []
self.posargs.append(existing.replace(kind=existing.POSITIONAL_ONLY))
_add_sources(self.src, existing.name, src)
elif existing.default == existing.empty:
raise ValueError('Unmatched regular parameter: {0}'
.format(existing))
def _merge_unmatched_kwoargs(self, unmatched_kwoargs, o_varkwargs, from_src):
if o_varkwargs:
self.kwoargs.update(unmatched_kwoargs)
_add_all_sources(self.src, unmatched_kwoargs.values(), from_src)
_exclude_from_seq(self.varkwargs_src, o_varkwargs)
else:
non_defaulted = [
arg
for arg in unmatched_kwoargs.values()
if arg.default == arg.empty
]
if non_defaulted:
raise ValueError(
'Unmatched keyword parameters: {0}'.format(
' '.join(str(arg) for arg in non_defaulted)))
def _concile_meta(self, left, right):
default = left.empty
if left.default != left.empty and right.default != right.empty:
if left.default == right.default:
default = left.default
else:
# The defaults are different. Short of using an "It's complicated"
# constant, None is the best replacement available, as a lot of
# python code already uses None as default then processes an
# actual default in the function body
default = None
annotation = left.empty
if left.annotation != left.empty and right.annotation != right.empty:
if left.annotation == right.annotation:
annotation = left.annotation
elif left.annotation != left.empty:
annotation = left.annotation
elif right.annotation != right.empty:
annotation = right.annotation
return left.replace(default=default, annotation=annotation)
def merge(*signatures):
"""Tries to compute a signature for which a valid call would also validate
the given signatures.
It guarantees any call that conforms to the merged signature will
conform to all the given signatures. However, some calls that don't
conform to the merged signature may actually work on all the given ones
regardless.
:param inspect.Signature signatures: The signatures to merge together.
:returns: a `inspect.Signature` object
:raises: `IncompatibleSignatures`
::
>>> from sigtools import signatures, support
>>> print(signatures.merge(
... support.s('one, two, *args, **kwargs'),
... support.s('one, two, three, *, alpha, **kwargs'),
... support.s('one, *args, beta, **kwargs')
... ))
(one, two, three, *, alpha, beta, **kwargs)
The resulting signature does not necessarily validate all ways of
conforming to the underlying signatures::
>>> from sigtools import signatures
>>> from inspect import signature
>>>
>>> def left(alpha, *args, **kwargs):
... return alpha
...
>>> def right(beta, *args, **kwargs):
... return beta
...
>>> sig_left = signature(left)
>>> sig_right = signature(right)
>>> sig_merged = signatures.merge(sig_left, sig_right)
>>>
>>> print(sig_merged)
(alpha, /, *args, **kwargs)
>>>
>>> kwargs = {'alpha': 'a', 'beta': 'b'}
>>> left(**kwargs), right(**kwargs) # both functions accept the call
('a', 'b')
>>>
>>> sig_merged.bind(**kwargs) # the merged signature doesn't
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/usr/lib64/python3.4/inspect.py", line 2642, in bind
return args[0]._bind(args[1:], kwargs)
File "/usr/lib64/python3.4/inspect.py", line 2542, in _bind
raise TypeError(msg) from None
TypeError: 'alpha' parameter is positional only, but was passed as a keyword
"""
assert signatures, "Expected at least one signature"
ret = sort_params(signatures[0], sources=True)
for i, sig in enumerate(signatures[1:], 1):
sorted_params = sort_params(sig, sources=True)
try:
ret = SortedParameters(*_Merger(ret, sorted_params))
except ValueError:
raise IncompatibleSignatures(sig, signatures[:i])
ret_sig = apply_params(signatures[0], *ret)
return ret_sig
def _check_no_dupes(collect, params):
names = [param.name for param in params]
dupes = collect.intersection(names)
if dupes:
raise ValueError('Duplicate parameter names: ' + ' '.join(dupes))
collect.update(names)
def _clear_defaults(ita):
for param in ita:
yield param.replace(default=param.empty)
def _embed(outer, inner, use_varargs=True, use_varkwargs=True, depth=1):
o_posargs, o_pokargs, o_varargs, o_kwoargs, o_varkwargs, o_src = outer
stars_sig = SortedParameters(
[], [], use_varargs and o_varargs,
{}, use_varkwargs and o_varkwargs, {})
i_posargs, i_pokargs, i_varargs, i_kwoargs, i_varkwargs, i_src = \
_Merger(inner, stars_sig)
names = set()
e_posargs = []
e_pokargs = []
e_kwoargs = _util.OrderedDict()
e_posargs.extend(o_posargs)
_check_no_dupes(names, o_posargs)
if i_posargs:
_check_no_dupes(names, o_pokargs)
e_posargs.extend(arg.replace(kind=arg.POSITIONAL_ONLY) for arg in o_pokargs)
if i_posargs[0].default is i_posargs[0].empty:
e_posargs = list(_clear_defaults(e_posargs))
_check_no_dupes(names, i_posargs)
e_posargs.extend(i_posargs)
else:
_check_no_dupes(names, o_pokargs)
if i_pokargs and i_pokargs[0].default == i_pokargs[0].empty:
e_posargs = list(_clear_defaults(e_posargs))
e_pokargs.extend(_clear_defaults(o_pokargs))
else:
e_pokargs.extend(o_pokargs)
_check_no_dupes(names, i_pokargs)
e_pokargs.extend(i_pokargs)
_check_no_dupes(names, o_kwoargs.values())
e_kwoargs.update(o_kwoargs)
_check_no_dupes(names, i_kwoargs.values())
e_kwoargs.update(i_kwoargs)
src = dict(i_src, **o_src)
if o_varargs and use_varargs:
src.pop(o_varargs.name, None)
if o_varkwargs and use_varkwargs:
src.pop(o_varkwargs.name, None)
src['+depths'] = merge_depths(
o_src.get('+depths', {}),
dict((f, v+depth) for f, v in i_src.get('+depths', {}).items()))
return (
e_posargs, e_pokargs, i_varargs if use_varargs else o_varargs,
e_kwoargs, i_varkwargs if use_varkwargs else o_varkwargs,
src
)
def embed(use_varargs=True, use_varkwargs=True, *signatures):
"""Embeds a signature within another's ``*args`` and ``**kwargs``
parameters, as if a function with the outer signature called a function with
the inner signature with just ``f(*args, **kwargs)``.
:param inspect.Signature signatures: The signatures to embed within
one-another, outermost first.
:param bool use_varargs: Make use of the ``*args``-like parameter.
:param bool use_varkwargs: Make use of the ``*kwargs``-like parameter.
:returns: a `inspect.Signature` object
:raises: `IncompatibleSignatures`
::
>>> from sigtools import signatures, support
>>> print(signatures.embed(
... support.s('one, *args, **kwargs'),
... support.s('two, *args, kw, **kwargs'),
... support.s('last'),
... ))
(one, two, last, *, kw)
>>> # use signatures.mask() to remove self-like parameters
>>> print(signatures.embed(
... support.s('self, *args, **kwargs'),
... signatures.mask(
... support.s('self, *args, keyword, **kwargs'), 1),
... ))
(self, *args, keyword, **kwargs)
"""
assert signatures
ret = sort_params(signatures[0], sources=True)
for i, sig in enumerate(signatures[1:], 1):
try:
ret = _embed(ret, sort_params(sig, sources=True),
use_varargs, use_varkwargs, i)
except ValueError:
raise IncompatibleSignatures(sig, signatures[:i])
return apply_params(signatures[0], *ret)
def _pop_chain(*sequences):
for sequence in sequences:
while sequence:
yield sequence.pop(0)
def _remove_from_src(src, ita):
for name in ita:
src.pop(name, None)
def _pnames(ita):
for p in ita:
yield p.name
def _mask(sig, num_args, hide_args, hide_kwargs,
hide_varargs, hide_varkwargs, named_args, partial_obj):
posargs, pokargs, varargs, kwoargs, varkwargs, src \
= sort_params(sig, sources=True)
pokargs_by_name = dict((p.name, p) for p in pokargs)
consumed_names = set()
if hide_args:
consumed_names.update(p.name for p in posargs)
consumed_names.update(p.name for p in pokargs)
posargs = []
pokargs = []
elif num_args:
consume = num_args
for param in _pop_chain(posargs, pokargs):
consume -= 1
consumed_names.add(param.name)
if not consume:
break
else:
if not varargs:
raise ValueError(
'Signature cannot be passed {0} arguments: {1}'
.format(num_args, sig))
_remove_from_src(src, consumed_names)
if hide_args or hide_varargs:
if varargs:
src.pop(varargs.name, None)
varargs = None
partial_mode = partial_obj is not None
if hide_kwargs:
_remove_from_src(src, _pnames(pokargs))
_remove_from_src(src, kwoargs)
pokargs = []
kwoargs = {}
named_args = []
for kwarg_name in named_args:
if kwarg_name in consumed_names:
raise ValueError('Duplicate argument: {0!r}'.format(kwarg_name))
elif kwarg_name in pokargs_by_name:
i = pokargs.index(pokargs_by_name[kwarg_name])
pokargs, param, conv_kwoargs = (
pokargs[:i], pokargs[i], pokargs[i+1:])
kwoargs.update(
(p.name, p.replace(kind=p.KEYWORD_ONLY))
for p in conv_kwoargs)
if partial_mode:
kwoargs[param.name] = param.replace(
kind=param.KEYWORD_ONLY, default=named_args[param.name])
else:
src.pop(kwarg_name, None)
if varargs:
src.pop(varargs.name, None)
varargs = None
pokargs_by_name.clear()
elif kwarg_name in kwoargs:
if partial_mode:
param = kwoargs[kwarg_name]
kwoargs[kwarg_name] = param.replace(
kind=param.KEYWORD_ONLY, default=named_args[kwarg_name])
else:
src.pop(kwarg_name, None)
kwoargs.pop(kwarg_name)
elif not varkwargs:
raise ValueError(
'Named parameter {0!r} not found in signature: {1}'
.format(kwarg_name, sig))
elif partial_mode:
kwoargs[kwarg_name] = _util.funcsigs.Parameter(
kwarg_name, _util.funcsigs.Parameter.KEYWORD_ONLY,
default=named_args[kwarg_name])
src[kwarg_name] = [partial_obj]
consumed_names.add(kwarg_name)
if hide_kwargs or hide_varkwargs:
if varkwargs:
src.pop(varkwargs.name, None)
varkwargs = None
if partial_mode:
src = copy_sources(src, increase=True)
src['+depths'][partial_obj] = 0
ret = apply_params(sig, posargs, pokargs, varargs, kwoargs, varkwargs, src)
return ret
def mask(sig, num_args=0,
hide_args=False, hide_kwargs=False,
hide_varargs=False, hide_varkwargs=False,
*named_args):
"""Removes the given amount of positional parameters and the given named
parameters from ``sig``.
:param inspect.Signature sig: The signature to operate on
:param int num_args: The amount of positional arguments passed
:param str named_args: The names of named arguments passed
:param hide_args: If true, mask all positional parameters
:param hide_kwargs: If true, mask all keyword parameters
:param hide_varargs: If true, mask the ``*args``-like parameter
completely if present.
:param hide_varkwargs: If true, mask the ``*kwargs``-like parameter
completely if present.
:return: a `inspect.Signature` object
:raises: `ValueError` if the signature cannot handle the arguments
to be passed.
::
>>> from sigtools import signatures, support
>>> print(signatures.mask(support.s('a, b, *, c, d'), 1, 'd'))
(b, *, c)
>>> print(signatures.mask(support.s('a, b, *args, c, d'), 3, 'd'))
(*args, c)
>>> print(signatures.mask(support.s('*args, c, d'), 2, 'd', hide_varargs=True))
(*, c)
"""
return _mask(sig, num_args, hide_args, hide_kwargs,
hide_varargs, hide_varkwargs, named_args, None)
def forwards(outer, inner, num_args=0,
hide_args=False, hide_kwargs=False,
use_varargs=True, use_varkwargs=True,
partial=False, *named_args):
"""Calls `mask` on ``inner``, then returns the result of calling
`embed` with ``outer`` and the result of `mask`.
:param inspect.Signature outer: The outermost signature.
:param inspect.Signature inner: The inner signature.
:param bool partial: Set to `True` if the arguments are passed to
``partial(func_with_inner, *args, **kwargs)`` rather than
``func_with_inner``.
``use_varargs`` and ``use_varkwargs`` are the same parameters as in
`.embed`, and ``num_args``, ``named_args``, ``hide_args`` and
``hide_kwargs`` are parameters of `.mask`.
:return: the resulting `inspect.Signature` object
:raises: `IncompatibleSignatures`
::
>>> from sigtools import support, signatures
>>> print(signatures.forwards(
... support.s('a, *args, x, **kwargs'),
... support.s('b, c, *, y, z'),
... 1, 'y'))
(a, c, *, x, z)
.. seealso::
:ref:`forwards-pick`
"""
if partial:
params = []
for param in inner.parameters.values():
if param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]:
params.append(param)
else:
params.append(param.replace(default=None))
inner = inner.replace(parameters=params)
return embed(
use_varargs, use_varkwargs,
outer,
mask(inner, num_args,
hide_args, hide_kwargs, False, False,
*named_args))
|
"""
In training deep networks, it is usually helpful to anneal the learning rate over time. Good intuition to have in mind
is that with a high learning rate, the system contains too much kinetic energy and the parameter vector bounces around
chaotically, unable to settle down into deeper, but narrower parts of the loss function. Knowing when to decay the
learning rate can be tricky: Decay it slowly and you’ll be wasting computation bouncing around chaotically with little
improvement for a long time. But decay it too aggressively and the system will cool too quickly, unable to reach the best
position it can. There are three common types of implementing the learning rate decay:
- step decay
- exponential decay
- 1/t decay
In practice, the step decay is slightly preferable because the hyperparameters it involves are more interpretable.
Lastly, if you can afford the computational budget, err on the side of slower decay and train for a longer time.
"""
# Copyright 2019 Gabriele Valvano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import ast
import os
from .callbacks import Callback
import idas.logger.json_logger as jlogger
import tensorflow as tf
def apply_step_decay(params, t):
"""
Reduces the learning rate by some factor every few epochs.
Args:
params: parameters for the annealing
t: iteration number (or you can use number of epochs)
Returns:
Updated learning rate
"""
lr = params['curr_lr'] # current learning rate
k = params['k'] # decay factor
period = params['period'] # period used to anneal
if (t % period) == 0 and (t != 0):
return lr * 1. / k
return lr
def apply_exp_decay(params, t):
"""
Implements the mathematical form: a = a0 * exp(-k * t).
Args:
params: parameters for the annealing
t: iteration number (or you can use number of epochs)
Returns:
Updated learning rate
"""
a0 = params['lr0'] # initial learning rate
k = params['k'] # decay factor
return a0 * np.exp(-k*t)
def apply_1overt_decay(params, t):
"""
Implements the mathematical form: a = a0 / (1 + k*t).
Args:
params: parameters for the annealing
t: iteration number (or you can use number of epochs)
Returns:
Updated learning rate
"""
a0 = params['lr0'] # initial learning rate
k = params['k'] # decay factor
return a0 * 1. / (1 + k*t)
def check_for_annealed_lr(cnn, sess, history_logs):
"""
Checks if the flag for learning rate annealing (eventually performed in the past) exists and is True.
Returns True if it is the case, False otherwise.
Args:
cnn (tensor): neural network
sess (tf.Session): TensorFlow Session object
history_logs (str): file with the history
Returns:
"""
has_been_run = False
try:
node = jlogger.read_one_node('LR_ANNEALING', file_name=history_logs)
if node['done_before']:
strategy = node['strategy']
last_lr = node['last_learning_rate']
print(" | This network was already trained with a strategy of \033[94m{0}\033[0m and the "
"last learning rate was \033[94m{1}\033[0m".format(strategy, last_lr))
print(" | The learning rate will be consequently set to \033[94m{0}\033[0m".format(last_lr))
print(' | - - ')
sess.run(cnn.lr.assign(last_lr))
has_been_run = True
except (FileNotFoundError, KeyError):
pass
return has_been_run
class LrAnnealingCallback(Callback):
""" Callback for learning rate annealing. """
def __init__(self):
super().__init__()
# Define variables here because the callback __init__() is called before the initialization of all variables
# in the graph.
self.history_log_file = None
def on_train_begin(self, training_state, **kwargs):
print("\nAnnealing the learning rate with strategy \033[94m{0}\033[0m".format(kwargs['annealing_strategy']))
cnn = kwargs['cnn']
if cnn is None:
raise Exception
self.history_log_file = kwargs['history_log_dir'] + os.sep + 'train_history.json'
try:
_ = jlogger.read_one_node('LR_ANNEALING', file_name=self.history_log_file)
except (FileNotFoundError, KeyError):
vals = {'done_before': True,
'strategy': kwargs['annealing_strategy'],
'parameters': kwargs['annealing_parameters'],
'annealing_epoch_delay': kwargs['annealing_epoch_delay'],
'last_learning_rate': ast.literal_eval(kwargs['annealing_parameters'])['lr0']}
jlogger.add_new_node('LR_ANNEALING', vals, file_name=self.history_log_file)
# define update operation:
up_value = tf.placeholder(tf.float32, None, name='update_lr_value')
self.update_lr = cnn.lr.assign(up_value, name='update_lr')
def on_epoch_end(self, training_state, **kwargs):
cnn = kwargs['cnn']
if cnn is None:
raise Exception
curr_epoch = cnn.g_epoch.eval()
if curr_epoch > kwargs['annealing_epoch_delay']:
call_strategy = {'step_decay': apply_step_decay,
'exp_decay': apply_exp_decay,
'1overT_decay': apply_1overt_decay}
params = ast.literal_eval(kwargs['annealing_parameters']) # convert string type to dictionary
params['curr_lr'] = cnn.lr.eval() # add current learning rate to the annealing parameters
# call the right decay method:
updated_lr = call_strategy[kwargs['annealing_strategy']](params, curr_epoch)
if updated_lr != params['curr_lr']:
print("\n\033[94mAnnealing the learning rate with strategy '{0}'... "
"New value = {1:0.2e}\033[0m".format(kwargs['annealing_strategy'], updated_lr))
# cnn.lr = updated_lr
kwargs['sess'].run(self.update_lr, feed_dict={'update_lr_value:0': updated_lr})
jlogger.update_node('LR_ANNEALING', sub_key='last_learning_rate', sub_value=updated_lr,
file_name=self.history_log_file)
|
# -*- coding: utf-8 -*-
"""Activate venv for current interpreter:
Use `import venv` along with a `--venv path/to/venv/base`
This can be used when you must use an existing Python interpreter, not the venv bin/python.
"""
import os
import site
import sys
if "--venv" in sys.argv:
# Code inspired by virutal-env::bin/active_this.py
venv_path = sys.argv[sys.argv.index("--venv") + 1]
bin_dir = os.path.abspath(os.path.join(venv_path, "bin"))
base = bin_dir[: -len("bin") - 1]
os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
os.environ["VIRTUAL_ENV"] = base
prev_length = len(sys.path)
python_libs = os.path.join(base, f"lib/python{sys.version_info.major}.{sys.version_info.minor}/site-packages")
site.addsitedir(python_libs.decode("utf-8") if "" else python_libs)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
sys.real_prefix = sys.prefix
sys.prefix = base
|
#!/usr/bin/env python3
# Importing computational model
import sys
sys.path.append('./model')
sys.path.append('./helpers')
from model import *
from helpers import *
# Starting Korali's Engine
import korali
####### Bayesian Problems
##### No NUTS
e = korali.Experiment()
e["Console Output"]["Frequency"] = 100
e["File Output"]["Enabled"] = False
# Configuring problem
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = model
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -20.0
e["Distributions"][0]["Maximum"] = +20.0
# Defining problem's variables and prior distribution for TMCMC
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][0]["Initial Mean"] = 0.0
e["Variables"][0]["Initial Standard Deviation"] = +20.0
# Configuring the HMC sampler parameters
e["Solver"]["Type"] = "Sampler/HMC"
e["Solver"]["Burn In"] = 100
e["Solver"]["Termination Criteria"]["Max Samples"] = 10
# HMC specific parameters
e["Solver"]["Use Adaptive Step Size"] = True
e["Solver"]["Num Integration Steps"] = 20
e["Solver"]["Step Size"] = 0.1
e["Solver"]["Target Integration Time"] = 1.0
e["Solver"]["Target Acceptance Rate"] = 0.71
# Running Korali
e["Random Seed"] = 0xC0FFEE
e["File Output"]["Path"] = "_result_run-hmc-nuts"
k = korali.Engine()
k.run(e)
##### Euclidean (No Diagonal)
e = korali.Experiment()
e["Console Output"]["Frequency"] = 100
e["File Output"]["Enabled"] = False
# Configuring problem
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = model
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -20.0
e["Distributions"][0]["Maximum"] = +20.0
# Defining problem's variables and prior distribution for TMCMC
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][0]["Initial Mean"] = 0.0
e["Variables"][0]["Initial Standard Deviation"] = +20.0
# Configuring the HMC sampler parameters
e["Solver"]["Type"] = "Sampler/HMC"
e["Solver"]["Burn In"] = 100
e["Solver"]["Termination Criteria"]["Max Samples"] = 10
# HMC specific parameters
e["Solver"]["Use Adaptive Step Size"] = True
e["Solver"]["Version"] = 'Euclidean'
e["Solver"]["Use NUTS"] = True
e["Solver"]["Use Diagonal Metric"] = False
e["Solver"]["Num Integration Steps"] = 20
e["Solver"]["Step Size"] = 0.1
e["Solver"]["Target Integration Time"] = 1.0
e["Solver"]["Target Acceptance Rate"] = 0.71
# Running Korali
e["Random Seed"] = 0xC0FFEE
e["File Output"]["Path"] = "_result_run-hmc-nuts"
k = korali.Engine()
k.run(e)
##### Euclidean (Diagonal)
e = korali.Experiment()
e["Console Output"]["Frequency"] = 100
e["File Output"]["Enabled"] = False
# Configuring problem
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = model
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -20.0
e["Distributions"][0]["Maximum"] = +20.0
# Defining problem's variables and prior distribution for TMCMC
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][0]["Initial Mean"] = 0.0
e["Variables"][0]["Initial Standard Deviation"] = +20.0
# Configuring the HMC sampler parameters
e["Solver"]["Type"] = "Sampler/HMC"
e["Solver"]["Burn In"] = 100
e["Solver"]["Termination Criteria"]["Max Samples"] = 10
# HMC specific parameters
e["Solver"]["Use Adaptive Step Size"] = True
e["Solver"]["Version"] = 'Euclidean'
e["Solver"]["Use NUTS"] = True
e["Solver"]["Use Diagonal Metric"] = True
e["Solver"]["Num Integration Steps"] = 20
e["Solver"]["Step Size"] = 0.1
e["Solver"]["Target Integration Time"] = 1.0
e["Solver"]["Target Acceptance Rate"] = 0.71
# Running Korali
e["Random Seed"] = 0xC0FFEE
e["File Output"]["Path"] = "_result_run-hmc-nuts"
k = korali.Engine()
k.run(e)
##### Static (No Diagonal)
e = korali.Experiment()
e["Console Output"]["Frequency"] = 100
e["File Output"]["Enabled"] = False
# Configuring problem
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = model
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -20.0
e["Distributions"][0]["Maximum"] = +20.0
# Defining problem's variables and prior distribution for TMCMC
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][0]["Initial Mean"] = 0.0
e["Variables"][0]["Initial Standard Deviation"] = +20.0
# Configuring the HMC sampler parameters
e["Solver"]["Type"] = "Sampler/HMC"
e["Solver"]["Burn In"] = 100
e["Solver"]["Termination Criteria"]["Max Samples"] = 10
# HMC specific parameters
e["Solver"]["Use Adaptive Step Size"] = True
e["Solver"]["Version"] = 'Static'
e["Solver"]["Use NUTS"] = True
e["Solver"]["Num Integration Steps"] = 20
e["Solver"]["Use Diagonal Metric"] = False
e["Solver"]["Step Size"] = 0.1
e["Solver"]["Target Integration Time"] = 1.0
e["Solver"]["Target Acceptance Rate"] = 0.71
# Running Korali
e["Random Seed"] = 0xC0FFEE
e["File Output"]["Path"] = "_result_run-hmc-nuts"
k = korali.Engine()
k.run(e)
##### Static (Diagonal)
e = korali.Experiment()
e["Console Output"]["Frequency"] = 100
e["File Output"]["Enabled"] = False
# Configuring problem
e["Problem"]["Type"] = "Bayesian/Custom"
e["Problem"]["Likelihood Model"] = model
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = -20.0
e["Distributions"][0]["Maximum"] = +20.0
# Defining problem's variables and prior distribution for TMCMC
e["Variables"][0]["Name"] = "X"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][0]["Initial Mean"] = 0.0
e["Variables"][0]["Initial Standard Deviation"] = +20.0
# Configuring the HMC sampler parameters
e["Solver"]["Type"] = "Sampler/HMC"
e["Solver"]["Burn In"] = 100
e["Solver"]["Termination Criteria"]["Max Samples"] = 10
# HMC specific parameters
e["Solver"]["Use Adaptive Step Size"] = True
e["Solver"]["Version"] = 'Static'
e["Solver"]["Use NUTS"] = True
e["Solver"]["Num Integration Steps"] = 20
e["Solver"]["Use Diagonal Metric"] = True
e["Solver"]["Step Size"] = 0.1
e["Solver"]["Target Integration Time"] = 1.0
e["Solver"]["Target Acceptance Rate"] = 0.71
# Running Korali
e["Random Seed"] = 0xC0FFEE
e["File Output"]["Path"] = "_result_run-hmc-nuts"
k = korali.Engine()
k.run(e)
|
import logging
import os
from docserver.api import schemas
logger = logging.getLogger(__name__)
HTML_LATEST_REDIRECT = """
<!DOCTYPE HTML>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="1; url=latest/">
<script>
window.location.href = "latest/"
</script>
<title>Page Redirection</title>
If you are not redirected automatically, here is the <a href='latest/'>latest documentation</a>
"""
def check_redirect(package: schemas.Package):
index = os.path.join(package.get_dir(), 'index.html')
if not os.path.exists(index):
logger.debug(f'Creating redirect for {package}')
with open(index, 'w') as f:
f.write(HTML_LATEST_REDIRECT)
f.close()
|
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import pmt, time
from gnuradio import gr_unittest, blocks, gr, analog
from gnuradio.gr.hier_block2 import _multiple_endpoints, _optional_endpoints
class test_hblk(gr.hier_block2):
def __init__(self, io_sig=1*[gr.sizeof_gr_complex], ndebug=2):
# parent constructor
gr.hier_block2.__init__(self,
"test_hblk",
gr.io_signature(len(io_sig), len(io_sig), io_sig[0]),
gr.io_signature(0,0,0))
self.message_port_register_hier_in("msg_in");
# Internal Stream Blocks
self.vsnk = blocks.vector_sink_c()
# Internal Msg Blocks
self.blks = [];
for i in range(0, ndebug):
self.blks.append( blocks.message_debug() )
# Set up internal connections
self.connect( self, self.vsnk )
for blk in self.blks:
self.msg_connect( self, "msg_in", blk, "print" )
class test_hier_block2(gr_unittest.TestCase):
def setUp(self):
self.call_log = []
self.Block = type("Block", (), {"to_basic_block": lambda bl: bl})
def test_f(self, *args):
"""test doc"""
self.call_log.append(args)
multi = _multiple_endpoints(test_f)
opt = _optional_endpoints(test_f)
def test_000(self):
self.assertEqual(self.multi.__doc__, "test doc")
self.assertEqual(self.multi.__name__, "test_f")
def test_001(self):
b = self.Block()
self.multi(b)
self.assertEqual((b,), self.call_log[0])
def test_002(self):
b1, b2 = self.Block(), self.Block()
self.multi(b1, b2)
self.assertEqual([(b1, 0, b2, 0)], self.call_log)
def test_003(self):
b1, b2 = self.Block(), self.Block()
self.multi((b1, 1), (b2, 2))
self.assertEqual([(b1, 1, b2, 2)], self.call_log)
def test_004(self):
b1, b2, b3, b4 = [self.Block()] * 4
self.multi(b1, (b2, 5), b3, (b4, 0))
expected = [
(b1, 0, b2, 5),
(b2, 5, b3, 0),
(b3, 0, b4, 0),
]
self.assertEqual(expected, self.call_log)
def test_005(self):
with self.assertRaises(ValueError):
self.multi((self.Block(), 5))
def test_006(self):
with self.assertRaises(ValueError):
self.multi(self.Block(), (5, 5))
def test_007(self):
b1, b2 = self.Block(), self.Block()
self.opt(b1, "in", b2, "out")
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_008(self):
f, b1, b2 = self.multi, self.Block(), self.Block()
self.opt((b1, "in"), (b2, "out"))
self.assertEqual([(b1, "in", b2, "out")], self.call_log)
def test_009(self):
with self.assertRaises(ValueError):
self.multi(self.Block(), 5)
def test_010(self):
s, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 0)
tb = gr.top_block()
tb.connect(s,h,k)
tb.run()
def test_011(self):
s, st, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.message_strobe(pmt.PMT_NIL, 100), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 1)
tb = gr.top_block()
tb.connect(s,h,k)
tb.msg_connect(st,"strobe",k,"msg_in")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
def test_012(self):
s, st, h, k = analog.sig_source_c(44100, analog.GR_COS_WAVE, 440, 1.0, 0.0), blocks.message_strobe(pmt.PMT_NIL, 100), blocks.head(gr.sizeof_gr_complex, 1000), test_hblk([gr.sizeof_gr_complex], 16)
tb = gr.top_block()
tb.connect(s,h,k)
tb.msg_connect(st,"strobe",k,"msg_in")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
if __name__ == '__main__':
gr_unittest.run(test_hier_block2, "test_hier_block2.xml")
|
#author: akshitac8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,sys
pwd = os.getcwd()
sys.path.insert(0,pwd)
print('-'*30)
print(os.getcwd())
print('-'*30)
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from model import Net
import json
from tqdm import tqdm
import numpy as np
import h5py
import argparse
import torchvision
import tensorflow as tf
import pandas as pd
import pickle
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from PIL import Image
import logging
logging.basicConfig(level=logging.INFO, filename='logs/test_feature_extract_openimages.log')
model = Net()
model = model.eval()
print(model)
GPU = True
if GPU:
gpus = '0,1,2,3'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
device_ids = [i for i in range(torch.cuda.device_count())]
if torch.cuda.device_count() > 1:
print("\n\nLet's use", torch.cuda.device_count(), "GPUs!\n\n")
if len(device_ids) > 1:
model = nn.DataParallel(model, device_ids=device_ids).cuda()
else:
model = model.cuda()
version = '2018_04'
path = 'datasets/OpenImages/{}'.format(version)
data_set = 'test'
df_label = pd.read_csv(path+'/{}/{}-annotations-human-imagelabels.csv'.format(data_set, data_set))
seen_labelmap_path = path+'/classes-trainable.txt'
dict_path = path+'/class-descriptions.csv'
unseen_labelmap_path = path+'/unseen_labels.pkl'
data_path = 'datasets/OpenImages/test/'
print('partitioning data')
capacity = 40000
partition_df = []
t = len(df_label)//capacity
for idx_cut in range(t):
partition_df.append(df_label.iloc[idx_cut*capacity:(idx_cut+1)*capacity])
partition_df.append(df_label.iloc[t*capacity:])
files = []
partition_idxs = []
for idx_partition, partition in enumerate(partition_df):
file_partition = [img_id+'.jpg' for img_id in partition['ImageID'].unique() if os.path.isfile(data_path+img_id+'.jpg')]
files.extend(file_partition)
partition_idxs.extend([idx_partition]*len(file_partition))
import collections
duplicate_files = [item for item, count in collections.Counter(files).items() if count > 1]
feat = {key: [] for key in duplicate_files}
# np.save('full_test_partition_idxs.npy', partition_idxs)
# np.save('full_test_files.npy', files)
# partition_idxs = np.load('full_test_partition_idxs.npy')
# files = np.load('full_test_files.npy')
def LoadLabelMap(seen_labelmap_path, unseen_labelmap_path, dict_path):
seen_labelmap = [line.rstrip() for line in tf.gfile.GFile(seen_labelmap_path)]
with open(unseen_labelmap_path, 'rb') as infile:
unseen_labelmap = pickle.load(infile).tolist()
label_dict = {}
for line in tf.gfile.GFile(dict_path):
words = [word.strip(' "\n') for word in line.split(',', 1)]
label_dict[words[0]] = words[1]
return seen_labelmap, unseen_labelmap, label_dict
predictions_eval = 0
predictions_eval_resize = 0
seen_labelmap, unseen_labelmap, label_dict = LoadLabelMap(seen_labelmap_path, unseen_labelmap_path, dict_path)
num_seen_classes = len(seen_labelmap)
num_unseen_classes = len(unseen_labelmap)
print('num_seen_classes', num_seen_classes,
'num_unseen_classes', num_unseen_classes)
def get_label(file,partition_idx):
img_id = file.split('.')[0] #file.decode('utf-8').split('.')[0]
df_img_label=partition_df[partition_idx].query('ImageID=="{}"'.format(img_id))
seen_label = np.zeros(num_seen_classes,dtype=np.int32)
unseen_label = np.zeros(num_unseen_classes,dtype=np.int32)
for index, row in df_img_label.iterrows():
if row['LabelName'] in seen_labelmap:
idx=seen_labelmap.index(row['LabelName'])
seen_label[idx] = 2*row['Confidence']-1
if row['LabelName'] in unseen_labelmap:
idx=unseen_labelmap.index(row['LabelName'])
unseen_label[idx] = 2*row['Confidence']-1
return seen_label,unseen_label
n_samples = len(files)
print("numpy array saved")
print('number of sample: {} dataset: {}'.format(n_samples, data_set))
print('number of unique sample: {} '.format(len(np.unique(files))))
transform = transforms.Compose([
transforms.Resize((224, 224)), # bilinear interpolation
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
class DatasetExtract(Dataset):
def __init__(self):
super(DatasetExtract, self).__init__()
def __len__(self):
return len(files)
def __getitem__(self, index):
file, partition_idx = files[index], partition_idxs[index]
seen_label, unseen_label = get_label(file, partition_idx)
filename = os.path.join(data_path, file)
img = Image.open(filename).convert('RGB')
img = transform(img)
return file.encode("ascii", "ignore"), img, seen_label, unseen_label
loader = DataLoader(dataset=DatasetExtract(), batch_size=64, shuffle=False, num_workers=32, drop_last=False)
src = 'datasets/OpenImages/test_features'
fn = os.path.join(src, 'OPENIMAGES_TEST_CONV5_4_NO_CENTERCROP.h5')
xx = {}
with h5py.File(fn, mode='w') as h5f:
for i, data in enumerate(tqdm(loader), 0):
_file, imgs, seen_label, unseen_label = data
imgs = imgs.cuda()
with torch.no_grad():
outs = model(imgs)#.view(imgs.shape[0],512,-1)
outs = np.float32(outs.cpu().numpy())
seen_label = np.int8(seen_label.numpy())
unseen_label = np.int8(unseen_label.numpy())
bs = outs.shape[0]
for m in range(bs):
file = _file[m].decode("utf-8")
if file in duplicate_files:
if len(feat[file]) == 2:
seen_label[m] = seen_label[m] + feat[file][0]
unseen_label[m] = unseen_label[m] + feat[file][1]
h5f.create_dataset(file+'-features', data=outs[m], dtype=np.float32, compression="gzip")
h5f.create_dataset(file+'-seenlabels', data=seen_label[m], dtype=np.int8, compression="gzip")
h5f.create_dataset(file+'-unseenlabels', data=unseen_label[m], dtype=np.int8, compression="gzip")
else:
feat[file].append(seen_label[m])
feat[file].append(unseen_label[m])
else:
h5f.create_dataset(file+'-features', data=outs[m], dtype=np.float32, compression="gzip")
h5f.create_dataset(file+'-seenlabels', data=seen_label[m], dtype=np.int8, compression="gzip")
h5f.create_dataset(file+'-unseenlabels', data=unseen_label[m], dtype=np.int8, compression="gzip")
test_loc = os.path.join(src, 'test_features', 'OPENIMAGES_TEST_CONV5_4_NO_CENTERCROP.h5')
test_features = h5py.File(test_loc, 'r')
test_feature_keys = list(test_features.keys())
image_names = np.unique(np.array([m.split('-')[0] for m in test_feature_keys]))
print(len(image_names))
# import pdb;pdb.set_trace()
# with h5py.File(fn, mode='a') as h5f_1:
# del h5f_1[file+'-features']
# del h5f_1[file+'-seenlabels']
# del h5f_1[file+'-unseenlabels']
# h5f_1.close()
# seen_label[m] = seen_label[m] + xx[file]['seen_label']
# unseen_label[m] = unseen_label[m] + xx[file]['unseen_label']
# xx[file] = {}
# xx[file]['seen_label'] = seen_label[m]
# xx[file]['unseen_label'] = unseen_label[m]
|
#!/usr/bin/env python3
import bird_view
import binary_filter
import camera_calibration
import cv2
from line_finder import LineFinder
import numpy as np
class Pipeline:
def __init__(self):
self.camera_model = camera_calibration.Model()
self.camera_model.load()
self.bird_view_model = bird_view.Model()
self.binary_filter_model = binary_filter.Model()
self.binary_filter_model.load()
self.radius = 300
self.offset = 0
def process(self, image):
undistortedImage = self.camera_model.undistort(image)
image = self.bird_view_model.create_bird_view(undistortedImage)
image = Pipeline.scale_down(image, 10)
image = self.binary_filter_model.process_image(image)
line_finder = LineFinder(image)
lineMasks = line_finder.find_lines()
linePolynomials = self.fit_lines(lineMasks)
image = self.draw_lines(undistortedImage, lineMasks)
image = self.draw_lane(image, linePolynomials)
image = self.draw_text(image, linePolynomials)
return image
def scale_down(image, times):
width = image.shape[1] // times
height = image.shape[0] // times
return cv2.resize(image, (width, height))
def draw_lines(self, image, lineMasks):
image = self.draw_line(image, lineMasks[0], (255, 0, 0))
image = self.draw_line(image, lineMasks[1], (0, 0, 255))
return image
def draw_line(self, undistortedImage, lineMask, color):
image = np.zeros(lineMask.shape + (3,), dtype=np.uint8)
image[lineMask] = (1, 1, 1)
color = np.full_like(image, color)
image = np.multiply(image, color)
image = cv2.resize(image, (bird_view.WIDTH, bird_view.HEIGHT))
image = self.bird_view_model.create_perspective_view(image)
image = cv2.addWeighted(
undistortedImage, 1.0, image, 1.0, 0.0)
return image
def fit_lines(self, lineMasks):
return (
self.fit_line(lineMasks[0]),
self.fit_line(lineMasks[1])
)
def fit_line(self, lineMask):
pixels = lineMask.nonzero()
points = np.multiply(pixels, 10) + 5 # convert to cm
polynomial = np.polyfit(points[0], points[1], 2)
return polynomial
def project_line(self, linePolynomial):
points = self.interploate_line(linePolynomial)
points = self.perspective_transform(points)
return points
def draw_lane(self, undistortedImage, linePolynomials):
leftPoints = self.project_line(linePolynomials[0])
rightPoints = self.project_line(linePolynomials[1])
points = np.concatenate((leftPoints, rightPoints[::-1]))
laneImage = np.zeros_like(undistortedImage)
cv2.fillPoly(
img=laneImage,
pts=[points],
color=(0, 255, 0))
undistortedImage = cv2.addWeighted(
undistortedImage, 1.0, laneImage, 0.3, 0.0)
return undistortedImage
def draw_text(self, image, lines):
radius = self.estimate_radius(lines)
radius = str(radius) if radius < 2000 else ">2000"
Pipeline.put_text(image, (100, 100),
"Curvature Radius: " + radius + "m")
offset = self.estimate_offset(lines)
offset = "{} cm {}".format(abs(offset),
"to the left" if offset > 0 else
"to the right" if offset < 0 else "")
Pipeline.put_text(image, (100, 200), "Offset: " + offset)
return image
def interploate_line(self, linePolynomial):
points = list()
for i in range(10):
y = i * bird_view.HEIGHT / 9
x = np.polyval(linePolynomial, y)
points.append((x, y))
return points
def perspective_transform(self, points):
return self.bird_view_model.perspective_transform(points)
def estimate_radius(self, lines):
leftRadius = Pipeline.curvature_radius(lines[0])
rightRadius = Pipeline.curvature_radius(lines[1])
radius = (leftRadius + rightRadius) / 2.0
self.radius = 0.95 * self.radius + 0.05 * radius
radius = int(self.radius) // 50 * 50
return radius
def estimate_offset(self, lines):
leftLinePosition = np.polyval(lines[0], bird_view.HEIGHT)
rightLinePosition = np.polyval(lines[1], bird_view.HEIGHT)
offset = (rightLinePosition + leftLinePosition) / 2.0 - \
bird_view.WIDTH / 2.0
self.offset = 0.9 * self.offset + 0.1 * offset
offset = int(self.offset) // 5 * 5
return offset
def curvature_radius(polynomial):
a = polynomial[0]
b = polynomial[1]
c = polynomial[2]
y = bird_view.HEIGHT
r = (1.0 + (2.0 * a * y + b) ** 2.0) ** 1.5 / abs(2.0 * a) / 100
return r
def put_text(image, org, text):
cv2.putText(
img=image,
text=text,
org=org,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=2,
color=(255, 255, 255),
thickness=3)
if __name__ == "__main__":
pipeline = Pipeline()
cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO)
cap = cv2.VideoCapture("project_video.mp4")
fourcc = cv2.VideoWriter_fourcc(*"XVID")
video = cv2.VideoWriter("video.avi", fourcc, 25, (1280, 720))
while True:
ret, image = cap.read()
if ret:
image = pipeline.process(image)
video.write(image)
cv2.imshow("image", image)
cv2.waitKey(1)
else:
break
cap.release()
video.release()
|
data = input()
unique_el = []
for index in range(len(data)):
if index + 1 < len(data):
if data[index] == data[index + 1]:
continue
else:
unique_el.append(data[index])
else:
unique_el.append(data[index])
print("".join(unique_el))
|
from myglobals import *
import urllib.request
import re
import pprint
def step1_pypi_index_raw():
url = "https://pypi.org/simple"
with urllib.request.urlopen(url) as f:
open(PYPI_INDEX_RAW, "wb").write(f.read())
def step2_pypi_index_py():
d = dict()
re_href = re.compile('<a href="(?P<href>.*?)">(?P<name>.*?)</a>')
for lineb in open(PYPI_INDEX_RAW, "rb"):
line = lineb.decode("utf-8")
m = re_href.search(line)
if m is None: continue
print(m.group("href"), m.group("name"))
d[m.group("name")] = m.group("href")
open(PYPI_INDEX_PY, "w").write("pypi_index = " + pprint.pformat(d))
def step3_pypi_index_dat():
f = open(PYPI_INDEX_DAT, "w")
re_href = re.compile('<a href="(?P<href>.*?)">(?P<name>.*?)</a>')
for lineb in open(PYPI_INDEX_RAW, "rb"):
line = lineb.decode("utf-8")
m = re_href.search(line)
if m is None: continue
name = m.group("name")
assert " " not in name
print(name, m.group("href"), file=f)
if __name__ == '__main__':
# step1_pypi_index_raw()
# step2_pypi_index_py()
step3_pypi_index_dat()
|
import logging
from typing import List
from weaverbird.backends.mongo_translator.steps.types import MongoStep
from weaverbird.pipeline import Pipeline, steps
from weaverbird.pipeline.steps import AppendStep, DomainStep
logger = logging.getLogger(__name__)
def translate_append(step: AppendStep) -> List[MongoStep]:
from weaverbird.backends.mongo_translator.mongo_pipeline_translator import translate_pipeline
pipelines = step.pipelines
pipelines_names = ['$_vqbPipelineInline']
lookups = []
for i, sub_pipeline in enumerate(pipelines):
pipeline_without_domain = Pipeline(steps=[])
if isinstance(sub_pipeline, str):
domain_step = DomainStep(name='domain', domain=sub_pipeline)
else:
try:
assert isinstance(sub_pipeline, list)
except AssertionError: # in this case sub_pipeline is a Reference
raise Exception('References must be resolved before translating the pipeline')
domain_step = DomainStep(**sub_pipeline[0])
pipeline_without_domain.steps = [
getattr(steps, f"{s['name'].capitalize()}Step")(**s) for s in sub_pipeline[1:]
]
lookups.append(
{
'$lookup': {
'from': domain_step.domain,
'pipeline': translate_pipeline(pipeline_without_domain),
'as': f'_vqbPipelineToAppend_{i}',
}
}
)
pipelines_names.append(f'$_vqbPipelineToAppend_{i}')
return [
{'$group': {'_id': None, '_vqbPipelineInline': {'$push': '$$ROOT'}}},
*lookups,
{'$project': {'_vqbPipelinesUnion': {'$concatArrays': pipelines_names}}},
{'$unwind': '$_vqbPipelinesUnion'},
{'$replaceRoot': {'newRoot': '$_vqbPipelinesUnion'}},
]
|
# An assortment of utilities.
from contextlib import contextmanager
@contextmanager
def restoring_sels(view):
old_sels = list(view.sel())
yield
view.sel().clear()
for s in old_sels:
# XXX: If the buffer has changed in the meantime, this won't work well.
view.sel().add(s)
def has_dirty_buffers(window):
for v in window.views():
if v.is_dirty():
return True
def show_ipanel(window, caption='', initial_text='', on_done=None,
on_change=None, on_cancel=None):
v = window.show_input_panel(caption, initial_text, on_done, on_change,
on_cancel)
return v
def is_view(view):
"""
Returns `True` if @view is a normal view.
"""
return not (is_widget(view) or is_console(view))
def is_widget(view):
"""
Returns `True` if @view is a widget.
"""
return view.settings().get('is_widget')
def is_console(view):
"""
Returns `True` if @view seems to be ST3's console.
"""
# XXX: Is this reliable?
return (getattr(view, 'settings') is None)
|
import unittest
from os.path import join
from pydantic import ValidationError
from tempfile import NamedTemporaryFile
import numpy as np
import rasterio
from rasterio.enums import ColorInterp
from rastervision.core import (RasterStats)
from rastervision.core.box import Box
from rastervision.core.utils.misc import save_img
from rastervision.core.data import (ChannelOrderError, RasterioSourceConfig,
StatsTransformerConfig, CropOffsets,
fill_overflow)
from rastervision.pipeline import rv_config
from tests import data_file_path
class TestRasterioSource(unittest.TestCase):
def setUp(self):
self.tmp_dir_obj = rv_config.get_tmp_dir()
self.tmp_dir = self.tmp_dir_obj.name
def tearDown(self):
self.tmp_dir_obj.cleanup()
def test_nodata_val(self):
# make geotiff filled with ones and zeros with nodata == 1
img_path = join(self.tmp_dir, 'tmp.tif')
height = 100
width = 100
nb_channels = 3
with rasterio.open(
img_path,
'w',
driver='GTiff',
height=height,
width=width,
count=nb_channels,
dtype=np.uint8,
nodata=1) as img_dataset:
im = np.random.randint(0, 2, (height, width, nb_channels)).astype(
np.uint8)
for channel in range(nb_channels):
img_dataset.write(im[:, :, channel], channel + 1)
config = RasterioSourceConfig(uris=[img_path])
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
out_chip = source.get_image_array()
expected_out_chip = np.zeros((height, width, nb_channels))
np.testing.assert_equal(out_chip, expected_out_chip)
def test_mask(self):
# make geotiff filled with ones and zeros and mask the whole image
img_path = join(self.tmp_dir, 'tmp.tif')
height = 100
width = 100
nb_channels = 3
with rasterio.open(
img_path,
'w',
driver='GTiff',
height=height,
width=width,
count=nb_channels,
dtype=np.uint8) as img_dataset:
im = np.random.randint(0, 2, (height, width, nb_channels)).astype(
np.uint8)
for channel in range(nb_channels):
img_dataset.write(im[:, :, channel], channel + 1)
img_dataset.write_mask(np.zeros(im.shape[0:2]).astype(np.bool))
config = RasterioSourceConfig(uris=[img_path])
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
out_chip = source.get_image_array()
expected_out_chip = np.zeros((height, width, nb_channels))
np.testing.assert_equal(out_chip, expected_out_chip)
def test_get_dtype(self):
img_path = data_file_path('small-rgb-tile.tif')
config = RasterioSourceConfig(uris=[img_path])
source = config.build(tmp_dir=self.tmp_dir)
self.assertEqual(source.get_dtype(), np.uint8)
def test_gets_raw_chip(self):
img_path = data_file_path('small-rgb-tile.tif')
channel_order = [0, 1]
config = RasterioSourceConfig(
uris=[img_path], channel_order=channel_order)
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
out_chip = source.get_raw_image_array()
self.assertEqual(out_chip.shape[2], 3)
def test_shift_x(self):
# Specially-engineered image w/ one meter per pixel resolution
# in the x direction.
img_path = data_file_path('ones.tif')
channel_order = [0]
config = RasterioSourceConfig(
uris=[img_path],
channel_order=channel_order,
x_shift=1.0,
y_shift=0.0)
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
extent = source.get_extent()
data = source.get_chip(extent)
self.assertEqual(data.sum(), 2**16 - 256)
column = data[:, 255, 0]
self.assertEqual(column.sum(), 0)
def test_shift_y(self):
# Specially-engineered image w/ one meter per pixel resolution
# in the y direction.
img_path = data_file_path('ones.tif')
channel_order = [0]
config = RasterioSourceConfig(
uris=[img_path],
channel_order=channel_order,
x_shift=0.0,
y_shift=1.0)
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
extent = source.get_extent()
data = source.get_chip(extent)
self.assertEqual(data.sum(), 2**16 - 256)
row = data[0, :, 0]
self.assertEqual(row.sum(), 0)
def test_gets_raw_chip_from_uint16_transformed_proto(self):
img_path = data_file_path('small-uint16-tile.tif')
channel_order = [0, 1]
config = RasterioSourceConfig(uris=[img_path])
raw_rs = config.build(tmp_dir=self.tmp_dir)
stats_uri = join(self.tmp_dir, 'tmp.tif')
stats = RasterStats()
stats.compute([raw_rs])
stats.save(stats_uri)
transformer = StatsTransformerConfig(stats_uri=stats_uri)
config = RasterioSourceConfig(
uris=[img_path],
channel_order=channel_order,
transformers=[transformer])
rs = config.build(tmp_dir=self.tmp_dir)
with rs.activate():
out_chip = rs.get_raw_image_array()
self.assertEqual(out_chip.shape[2], 3)
def test_uses_channel_order(self):
img_path = join(self.tmp_dir, 'img.tif')
chip = np.ones((2, 2, 4)).astype(np.uint8)
chip[:, :, :] *= np.array([0, 1, 2, 3]).astype(np.uint8)
save_img(chip, img_path)
channel_order = [0, 1, 2]
config = RasterioSourceConfig(
uris=[img_path], channel_order=channel_order)
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
out_chip = source.get_image_array()
expected_out_chip = np.ones((2, 2, 3)).astype(np.uint8)
expected_out_chip[:, :, :] *= np.array([0, 1, 2]).astype(np.uint8)
np.testing.assert_equal(out_chip, expected_out_chip)
def test_channel_order_error(self):
img_path = join(self.tmp_dir, 'img.tif')
chip = np.ones((2, 2, 3)).astype(np.uint8)
chip[:, :, :] *= np.array([0, 1, 2]).astype(np.uint8)
save_img(chip, img_path)
channel_order = [3, 1, 0]
with self.assertRaises(ChannelOrderError):
config = RasterioSourceConfig(
uris=[img_path], channel_order=channel_order)
config.build(tmp_dir=self.tmp_dir)
def test_detects_alpha(self):
# Set first channel to alpha. Expectation is that when omitting channel_order,
# only the second and third channels will be in output.
img_path = join(self.tmp_dir, 'img.tif')
chip = np.ones((2, 2, 3)).astype(np.uint8)
chip[:, :, :] *= np.array([0, 1, 2]).astype(np.uint8)
save_img(chip, img_path)
ci = (ColorInterp.alpha, ColorInterp.blue, ColorInterp.green)
with rasterio.open(img_path, 'r+') as src:
src.colorinterp = ci
config = RasterioSourceConfig(uris=[img_path])
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
out_chip = source.get_image_array()
expected_out_chip = np.ones((2, 2, 2)).astype(np.uint8)
expected_out_chip[:, :, :] *= np.array([1, 2]).astype(np.uint8)
np.testing.assert_equal(out_chip, expected_out_chip)
def test_non_geo(self):
# Check if non-georeferenced image files can be read and CRSTransformer
# implements the identity function.
img_path = join(self.tmp_dir, 'img.png')
chip = np.ones((2, 2, 3)).astype(np.uint8)
save_img(chip, img_path)
config = RasterioSourceConfig(uris=[img_path])
source = config.build(tmp_dir=self.tmp_dir)
with source.activate():
out_chip = source.get_image_array()
np.testing.assert_equal(out_chip, chip)
p = (3, 4)
out_p = source.get_crs_transformer().map_to_pixel(p)
np.testing.assert_equal(out_p, p)
out_p = source.get_crs_transformer().pixel_to_map(p)
np.testing.assert_equal(out_p, p)
def test_no_epsg(self):
crs = rasterio.crs.CRS()
img_path = join(self.tmp_dir, 'tmp.tif')
height = 100
width = 100
nb_channels = 3
with rasterio.open(
img_path,
'w',
driver='GTiff',
height=height,
width=width,
count=nb_channels,
dtype=np.uint8,
crs=crs) as img_dataset:
im = np.zeros((height, width, nb_channels)).astype(np.uint8)
for channel in range(nb_channels):
img_dataset.write(im[:, :, channel], channel + 1)
try:
config = RasterioSourceConfig(uris=[img_path])
config.build(tmp_dir=self.tmp_dir)
except Exception:
self.fail(
'Creating RasterioSource with CRS with no EPSG attribute '
'raised an exception when it should not have.')
def test_extent(self):
img_path = data_file_path('small-rgb-tile.tif')
cfg = RasterioSourceConfig(uris=[img_path])
rs = cfg.build(tmp_dir=self.tmp_dir)
extent = rs.get_extent()
h, w = extent.get_height(), extent.get_width()
ymin, xmin, ymax, xmax = extent
self.assertEqual(h, 256)
self.assertEqual(w, 256)
self.assertEqual(ymin, 0)
self.assertEqual(xmin, 0)
self.assertEqual(ymax, 256)
self.assertEqual(xmax, 256)
def test_extent_crop(self):
f = 1 / 4
img_path = data_file_path('small-rgb-tile.tif')
cfg_crop = RasterioSourceConfig(
uris=[img_path], extent_crop=(f, f, f, f))
rs_crop = cfg_crop.build(tmp_dir=self.tmp_dir)
# test extent box
extent_crop = rs_crop.get_extent()
self.assertEqual(extent_crop.ymin, 64)
self.assertEqual(extent_crop.xmin, 64)
self.assertEqual(extent_crop.ymax, 192)
self.assertEqual(extent_crop.xmax, 192)
# test windows
windows = extent_crop.get_windows(64, 64)
self.assertEqual(windows[0].ymin, 64)
self.assertEqual(windows[0].xmin, 64)
self.assertEqual(windows[-1].ymax, 192)
self.assertEqual(windows[-1].xmax, 192)
# test CropOffsets class
cfg_crop = RasterioSourceConfig(
uris=[img_path],
extent_crop=CropOffsets(skip_top=.5, skip_right=.5))
rs_crop = cfg_crop.build(tmp_dir=self.tmp_dir)
extent_crop = rs_crop.get_extent()
self.assertEqual(extent_crop.ymin, 128)
self.assertEqual(extent_crop.xmin, 0)
self.assertEqual(extent_crop.ymax, 256)
self.assertEqual(extent_crop.xmax, 128)
# test validation
extent_crop = CropOffsets(skip_top=.5, skip_bottom=.5)
self.assertRaises(
ValidationError,
lambda: RasterioSourceConfig(uris=[img_path],
extent_crop=extent_crop))
extent_crop = CropOffsets(skip_left=.5, skip_right=.5)
self.assertRaises(
ValidationError,
lambda: RasterioSourceConfig(uris=[img_path],
extent_crop=extent_crop))
# test extent_crop=None
try:
_ = RasterioSourceConfig(uris=[img_path], extent_crop=None) # noqa
except Exception:
self.fail('extent_crop=None caused an error.')
def test_fill_overflow(self):
extent = Box(10, 10, 90, 90)
window = Box(0, 0, 100, 100)
arr = np.ones((100, 100), dtype=np.uint8)
out = fill_overflow(extent, window, arr)
mask = np.zeros_like(arr).astype(np.bool)
mask[10:90, 10:90] = 1
self.assertTrue(np.all(out[mask] == 1))
self.assertTrue(np.all(out[~mask] == 0))
window = Box(0, 0, 80, 100)
arr = np.ones((80, 100), dtype=np.uint8)
out = fill_overflow(extent, window, arr)
mask = np.zeros((80, 100), dtype=np.bool)
mask[10:90, 10:90] = 1
self.assertTrue(np.all(out[mask] == 1))
self.assertTrue(np.all(out[~mask] == 0))
def test_extent_crop_overflow(self):
f = 1 / 10
arr = np.ones((100, 100), dtype=np.uint8)
mask = np.zeros_like(arr).astype(np.bool)
mask[10:90, 10:90] = 1
with NamedTemporaryFile('wb') as fp:
uri = fp.name
with rasterio.open(
uri,
'w',
driver='GTiff',
height=100,
width=100,
count=1,
dtype=np.uint8) as ds:
ds.write_band(1, arr)
cfg = RasterioSourceConfig(uris=[uri], extent_crop=(f, f, f, f))
rs = cfg.build(tmp_dir=self.tmp_dir)
with rs.activate():
out = rs.get_chip(Box(0, 0, 100, 100))[..., 0]
self.assertTrue(np.all(out[mask] == 1))
self.assertTrue(np.all(out[~mask] == 0))
if __name__ == '__main__':
unittest.main()
|
import typing
import inspect
from . import task
class App:
def __init__(
self, task_classes: typing.List[typing.Type[task.Task]], watchdog_duration: float = 0.1
) -> None:
self._validate_app_args(task_classes=task_classes, watchdog_duration=watchdog_duration)
self.task_classes = task_classes
self.watchdog_duration = watchdog_duration
@staticmethod
def _validate_app_args(
task_classes: typing.List[typing.Type[task.Task]], watchdog_duration: float
):
if not isinstance(task_classes, list):
raise ValueError(
"""`task_classes` should be of type:: `list` You entered: {0}""".format(
type(task_classes)
)
)
for tsk in task_classes:
if not inspect.isclass(tsk):
raise ValueError(
"""each element of `task_classes` should be a class and NOT a class instance"""
)
if not issubclass(tsk, task.Task):
raise ValueError(
"""each element of `task_classes` should be a subclass of:: `wiji.task.Task`"""
)
if not isinstance(watchdog_duration, float):
raise ValueError(
"""`watchdog_duration` should be of type:: `float` You entered: {0}""".format(
type(watchdog_duration)
)
)
|
from can_tools.scrapers.official.TX.tx_state import Texas
|
import numpy as np
import matplotlib.pyplot as plt
import sys
import caffe
from PIL import Image
import cv2
# IMAGE_WIDTH = 200
# IMAGE_HEIGHT = 66
#
# def transform_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):
#
# #Histogram Equalization
# img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
# img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
# img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
#
# #Image Resizing
# img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
#
# return img
#
# # im = np.array(Image.open('cat_gray.jpg'))
# im = np.array(Image.open('center_2016_12_01_13_31_13_786.jpg'),dtype=np.float32)
# print('the image shape is {}'.format(im.shape))
#
# print('the image type is {}'.format(type(im)))
#
# plt.subplot(1,2,1)
# plt.imshow(im)
# plt.title('original image')
# im_resize = transform_img(im,img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
# plt.subplot(1,2,2)
# plt.imshow(im_resize)
# plt.title('resized image')
# print('the reshaped image shape is {}'.format(im.shape))
compression_net = caffe.Net('pilotnet_deploy_s_SVD50.prototxt','snapshot/s_SVD50_20000.caffemodel',caffe.TEST)
nocompression_net = caffe.Net('pilotnet_deploy_s.prototxt','snapshot/sully_cropdata_20000.caffemodel',caffe.TEST)
# params = ['conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8','fc9']
params = []
for k,v in nocompression_net.params.items():
params.append(k)
print(k,v[0].data.shape,v[1].data.shape)
for pr in params:
if pr != 'fc6':
compression_net.params[pr] = nocompression_net.params[pr]
print('{} layer has been transfered'.format(pr))
add1 = compression_net.params['add1'][0].data[...]
print('add1 layer shape is {}\n '.format(add1.shape))
[add1_x,add1_y] = add1.shape
fc6 = compression_net.params['fc6'][0].data[...]
print('fc6 layer shape is {}\n '.format(fc6.shape))
[fc6_x,fc6_y] = fc6.shape
fc6_weight_matrix = nocompression_net.params['fc6'][0].data[...]
u, s, vh = np.linalg.svd(fc6_weight_matrix)
print('u shape is {}'.format(u.shape))
print('s shape is {}'.format(s.shape))
print('vh shape is {}'.format(vh.shape))
hhh = 0
k = np.zeros((add1_x,vh.shape[0]))
k[0:add1_x,0:add1_x] = np.diag(s)[hhh:add1_x+hhh,hhh:add1_x+hhh]
a = u[:,hhh:add1_x+hhh]
b = np.dot(k,vh)
print(k.shape)
print(a.shape)
print(b.shape)
compression_net.params['fc6'][0].data[...] = a
compression_net.params['fc6'][1].data[...] = nocompression_net.params['fc6'][1].data[...]
compression_net.params['add1'][0].data[...] = b
compression_net.save('new_model/mySVDcompression.caffemodel')
# compression_net.params['add1'][0].data =
# fc6 = compression_net.params['fc6'][0].data[...].reshape([add1add1_length])
#
#
# print('fc6 layer shape is\n {}\n '.format(fc6.shape))
# k = nocompression_net.params['conv1']
# fc9=k
# print('fc6_biaos_matrix is\n {}\n shape is {}'.format(fc6_biaos_matrix,fc6_biaos_matrix.shape))
# print('the input data is {}'.format(net.blobs['data'].data[...].shape))
# print('the net is {}'.format(net))
|
class ToolStripSeparatorRenderEventArgs(ToolStripItemRenderEventArgs):
"""
Provides data for the System.Windows.Forms.ToolStripRenderer.RenderGrip event.
ToolStripSeparatorRenderEventArgs(g: Graphics,separator: ToolStripSeparator,vertical: bool)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return ToolStripSeparatorRenderEventArgs()
@staticmethod
def __new__(self,g,separator,vertical):
""" __new__(cls: type,g: Graphics,separator: ToolStripSeparator,vertical: bool) """
pass
Vertical=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the display style for the grip is vertical.
Get: Vertical(self: ToolStripSeparatorRenderEventArgs) -> bool
"""
|
from __future__ import print_function
import pychromecast
if __name__ == "__main__":
chromecasts = pychromecast.get_chromecasts()
print(chromecasts)
|
from visci.app import generate_vis_index
from glob import glob
import os
base = os.path.abspath("../")
template_files = glob("%s/gallery/*.html" %base)
generate_vis_index(template_files,base)
|
# Copyright 2022 @ReneFreingruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import standardizer.standardizer_helpers as standardizer_helpers
# Call this function just as soon as comments are removed
# TODO: The code is very similar to:
# import native_code.speed_optimized_functions as speed_optimized_functions
# speed_optimized_functions.get_index_of_next_symbol_not_within_string()
def add_newlines(code_to_minimize, required_coverage):
tmp = ""
in_str_double_quote = False
in_str_single_quote = False
in_template_str = False
in_forward_slash = False
previous_backward_slash = False
for line in code_to_minimize.split("\n"):
for current_char in line:
if current_char == '"':
if previous_backward_slash:
tmp += current_char
previous_backward_slash = False
continue
tmp += current_char
in_str_double_quote = not in_str_double_quote
previous_backward_slash = False
elif current_char == "'":
if previous_backward_slash:
tmp += current_char
previous_backward_slash = False
continue
tmp += current_char
in_str_single_quote = not in_str_single_quote
previous_backward_slash = False
elif current_char == "`":
if previous_backward_slash:
# `\`` === '`' // --> true
tmp += current_char
previous_backward_slash = False
continue
tmp += current_char
in_template_str = not in_template_str
previous_backward_slash = False
elif current_char == "\\":
previous_backward_slash = not previous_backward_slash
tmp += current_char
elif current_char == "/":
if in_str_double_quote or in_str_single_quote or in_template_str or previous_backward_slash:
pass
else:
in_forward_slash = not in_forward_slash
tmp += current_char
previous_backward_slash = False
elif current_char == "{":
if in_str_double_quote or in_str_single_quote or in_template_str or in_forward_slash:
tmp += current_char
else:
# not in a string, so we can add a newline
tmp += current_char + "\n"
# Important, if the character is a {, I can't add a newline in front of the {
# The reason is code like this:
# return {0.1: a};
# If a newline would be added, the return would just be executed (this is the only exception of this behavior in JavaScript..)
previous_backward_slash = False
elif current_char == "}":
if in_str_double_quote or in_str_single_quote or in_template_str or in_forward_slash:
tmp += current_char
else:
# not in a string, so we can add a newline
tmp += "\n" + current_char + "\n"
previous_backward_slash = False
else:
tmp += current_char
previous_backward_slash = False
tmp += "\n"
# Now remove completely empty lines
minimized_code = ""
for line in tmp.split("\n"):
if line.strip() == "":
continue
minimized_code += line + "\n"
if minimized_code == code_to_minimize.rstrip():
# Nothing was modified, so it must not be executed again
return minimized_code
if standardizer_helpers.does_code_still_trigger_coverage(minimized_code, required_coverage):
# Minimization worked and we still trigger the new coverage
return minimized_code
else:
# Something went wrong and minimization didn't work
return code_to_minimize
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This library is an attempt to implement the LSV2 communication protocol used by certain
CNC controls.
Please consider the dangers of using this library on a production machine! This library is
by no means complete and could damage the control or cause injuries! Everything beyond simple
file manipulation is blocked by a lockout parameter. Use at your own risk!
"""
import logging
import re
import struct
from pathlib import Path
from .const import (ControlType, Login, MemoryType, LSV2Err, CMD, RSP,
ParCCC, ParRVR, ParRRI, ParRDR, BIN_FILES, MODE_BINARY)
from .low_level_com import LLLSV2Com
from .misc import (decode_directory_info, decode_error_message,
decode_file_system_info, decode_override_information,
decode_system_parameters, decode_tool_information)
from .translate_messages import (get_error_text, get_execution_status_text,
get_program_status_text)
class LSV2():
"""Implementation of the LSV2 protocol used to communicate with certain CNC controls"""
def __init__(self, hostname, port=0, timeout=15.0, safe_mode=True):
"""init object variables and create socket"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self._llcom = LLLSV2Com(hostname, port, timeout)
self._buffer_size = LLLSV2Com.DEFAULT_BUFFER_SIZE
self._active_logins = list()
if safe_mode:
logging.info(
'safe mode is active, login and system commands are restricted')
self._known_logins = (Login.INSPECT, Login.FILETRANSFER)
self._known_sys_cmd = (ParCCC.SET_BUF1024, ParCCC.SET_BUF512, ParCCC.SET_BUF2048,
ParCCC.SET_BUF3072, ParCCC.SET_BUF4096, ParCCC.SECURE_FILE_SEND)
else:
logging.info(
'safe mode is off, login and system commands are not restricted. Use with caution!')
self._known_logins = list(Login)
self._known_sys_cmd = list(ParCCC)
self._versions = None
self._sys_par = None
self._secure_file_send = False
self._control_type = ControlType.UNKNOWN
self._last_error_code = None
def connect(self):
"""connect to control"""
self._llcom.connect()
self._configure_connection()
def disconnect(self):
"""logout of all open logins and close connection"""
self.logout(login=None)
self._llcom.disconnect()
logging.debug('Connection to host closed')
def is_itnc(self):
"""return true if control is a iTNC"""
return self._control_type == ControlType.MILL_OLD
def is_tnc(self):
"""return true if control is a TNC"""
return self._control_type == ControlType.MILL_NEW
def is_pilot(self):
"""return true if control is a CNCPILOT640"""
return self._control_type == ControlType.LATHE_NEW
@staticmethod
def _decode_error(content):
"""decode error codes to text"""
byte_1, byte_2, = struct.unpack('!BB', content)
error_text = get_error_text(byte_1, byte_2)
logging.warning(
'T_ER or T_BD received, an error occurred during the execution of the last command: %s', error_text)
return error_text
def _send_recive(self, command, expected_response, payload=None):
"""takes a command and payload, sends it to the control and checks
if the response is as expected. Returns content if not an error"""
if expected_response is None:
self._llcom.telegram(
command, payload, buffer_size=self._buffer_size, wait_for_response=False)
logging.info(
'command %s sent successfully, did not check for response', command)
return True
else:
response, content = self._llcom.telegram(
command, payload, buffer_size=self._buffer_size, wait_for_response=True)
if response in expected_response:
if content is not None and len(content) > 0:
logging.debug(
'command %s executed successfully, received %s with %d bytes payload', command, response, len(content))
return content
logging.debug(
'command %s executed successfully, received %s without any payload', command, response)
return True
if response in RSP.T_ER:
self._decode_error(content)
self._last_error_code = struct.unpack('!BB', content)
else:
logging.error(
'recived unexpected response %s to command %s. response code %s', response, command, content)
self._last_error_code = None
return False
def _send_recive_block(self, command, expected_response, payload=None):
"""takes a command and payload, sends it to the control and continues reading
until the expected response is received."""
response_buffer = list()
response, content = self._llcom.telegram(command, payload, buffer_size=self._buffer_size)
if response in RSP.T_ER:
self._decode_error(content)
elif response in RSP.T_FD:
logging.debug('Transfer is finished with no content')
elif response not in expected_response:
logging.error(
'recived unexpected response %s block read for command %s. response code %s', response, command, content)
raise Exception('recived unexpected response {}'.format(response))
else:
while response in expected_response:
response_buffer.append(content)
response, content = self._llcom.telegram(RSP.T_OK, buffer_size=self._buffer_size)
return response_buffer
def _send_recive_ack(self, command, payload=None):
"""sends command and pyload to control, returns True on T_OK"""
response, content = self._llcom.telegram(command, payload, buffer_size=self._buffer_size)
if response in RSP.T_OK:
return True
if response in RSP.T_ER:
self._decode_error(content)
else:
logging.error(
'recived unexpected response %s to command %s. response code %s', response, command, content)
return False
def _configure_connection(self):
"""Set up the communication parameters for file transfer. Buffer size and secure file
transfere are enabled based on the capabilitys of the control.
:rtype: None
"""
self.login(login=Login.INSPECT)
control_type = self.get_versions()['Control']
max_block_length = self.get_system_parameter()['Max_Block_Length']
logging.info('setting connection settings for %s and block length %s', control_type, max_block_length)
if control_type in ('TNC640', 'TNC620', 'TNC320', 'TNC128'):
self._control_type = ControlType.MILL_NEW
elif control_type in ('iTNC530', 'iTNC530 Programm'):
self._control_type = ControlType.MILL_OLD
elif control_type in ('CNCPILOT640', ):
self._control_type = ControlType.LATHE_NEW
else:
logging.warning(
'Unknown control type, treat machine as new style mill')
self._control_type = ControlType.MILL_NEW
selected_size = -1
selected_command = None
if max_block_length >= 4096:
selected_size = 4096
selected_command = ParCCC.SET_BUF4096
elif 3072 <= max_block_length < 4096:
selected_size = 3072
selected_command = ParCCC.SET_BUF3072
elif 2048 <= max_block_length < 3072:
selected_size = 2048
selected_command = ParCCC.SET_BUF2048
elif 1024 <= max_block_length < 2048:
selected_size = 1024
selected_command = ParCCC.SET_BUF1024
elif 512 <= max_block_length < 1024:
selected_size = 512
selected_command = ParCCC.SET_BUF512
elif 256 <= max_block_length < 512:
selected_size = 256
else:
logging.error(
'could not decide on a buffer size for maximum message length of %d', max_block_length)
raise Exception('unknown buffer size')
if selected_command is None:
logging.debug('use smallest buffer size of 256')
self._buffer_size = selected_size
else:
logging.debug('use buffer size of %d', selected_size)
if self.set_system_command(selected_command):
self._buffer_size = selected_size
else:
raise Exception(
'error in communication while setting buffer size to %d' % selected_size)
if not self.set_system_command(ParCCC.SECURE_FILE_SEND):
logging.warning('secure file transfer not supported? use fallback')
self._secure_file_send = False
else:
self._secure_file_send = True
self.login(login=Login.FILETRANSFER)
logging.info(
'successfully configured connection parameters and basic logins. selected buffer size is %d, use secure file send: %s', self._buffer_size, self._secure_file_send)
def login(self, login, password=None):
"""Request additional access rights. To elevate this level a logon has to be performed. Some levels require a password.
:param str login: One of the known login strings
:param str password: optional. Password for login
:returns: True if execution was successful
:rtype: bool
"""
if login in self._active_logins:
logging.debug('login already active')
return True
if login not in self._known_logins:
logging.error('unknown or unsupported login')
return False
payload = bytearray()
payload.extend(map(ord, login))
payload.append(0x00)
if password is not None:
payload.extend(map(ord, password))
payload.append(0x00)
if not self._send_recive_ack(CMD.A_LG, payload):
logging.error('an error occurred during login for login %s', login)
return False
self._active_logins.append(login)
logging.info('login executed successfully for login %s', login)
return True
def logout(self, login=None):
"""Drop one or all access right. If no login is supplied all active access rights are dropped.
:param str login: optional. One of the known login strings
:returns: True if execution was successful
:rtype: bool
"""
if login in self._known_logins or login is None:
logging.debug('logout for login %s', login)
if login in self._active_logins or login is None:
payload = bytearray()
if login is not None:
payload.extend(map(ord, login))
payload.append(0x00)
if self._send_recive_ack(CMD.A_LO, payload):
logging.info(
'logout executed successfully for login %s', login)
if login is not None:
self._active_logins.remove(login)
else:
self._active_logins = list()
return True
else:
logging.info(
'login %s was not active, logout not necessary', login)
return True
else:
logging.warning('unknown or unsupported user')
return False
def set_system_command(self, command, parameter=None):
"""Execute a system command on the control if command is one a known value. If safe mode is active, some of the
commands are disabled. If necessary additinal parameters can be supplied.
:param int command: system command
:param str parameter: optional. parameter payload for system command
:returns: True if execution was successful
:rtype: bool
"""
if command in self._known_sys_cmd:
payload = bytearray()
payload.extend(struct.pack('!H', command))
if parameter is not None:
payload.extend(map(ord, parameter))
payload.append(0x00)
if self._send_recive_ack(CMD.C_CC, payload):
return True
logging.debug('unknown or unsupported system command')
return False
def get_system_parameter(self, force=False):
"""Get all version information, result is bufferd since it is also used internally. With parameter force it is
possible to manually re-read the information form the control
:param bool force: if True the information is read even if it is already buffered
:returns: dictionary with system parameters like number of plc variables, supported lsv2 version etc.
:rtype: dict
"""
if self._sys_par is not None and force is False:
logging.debug(
'version info already in memory, return previous values')
return self._sys_par
result = self._send_recive(
command=CMD.R_PR, expected_response=RSP.S_PR)
if result:
sys_par = decode_system_parameters(result)
logging.debug('got system parameters: %s', sys_par)
self._sys_par = sys_par
return self._sys_par
logging.error('an error occurred while querying system parameters')
return False
def get_versions(self, force=False):
"""Get all version information, result is bufferd since it is also used internally. With parameter force it is
possible to manually re-read the information form the control
:param bool force: if True the information is read even if it is already buffered
:returns: dictionary with version text for control type, nc software, plc software, software options etc.
:rtype: dict
"""
if self._versions is not None and force is False:
logging.debug(
'version info already in memory, return previous values')
else:
info_data = dict()
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.CONTROL))
if result:
info_data['Control'] = result.strip(b'\x00').decode('utf-8')
else:
raise Exception(
'Could not read version information from control')
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.NC_VERSION))
if result:
info_data['NC_Version'] = result.strip(b'\x00').decode('utf-8')
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.PLC_VERSION))
if result:
info_data['PLC_Version'] = result.strip(
b'\x00').decode('utf-8')
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.OPTIONS))
if result:
info_data['Options'] = result.strip(b'\x00').decode('utf-8')
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.ID))
if result:
info_data['ID'] = result.strip(b'\x00').decode('utf-8')
if self.is_itnc():
info_data['Release_Type'] = 'not supported'
else:
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.RELEASE_TYPE))
if result:
info_data['Release_Type'] = result.strip(
b'\x00').decode('utf-8')
result = self._send_recive(CMD.R_VR, RSP.S_VR, payload=struct.pack(
'!B', ParRVR.SPLC_VERSION))
if result:
info_data['SPLC_Version'] = result.strip(
b'\x00').decode('utf-8')
else:
info_data['SPLC_Version'] = 'not supported'
logging.debug('got version info: %s', info_data)
self._versions = info_data
return self._versions
def get_program_status(self):
"""Get status code of currently active program
See https://github.com/tfischer73/Eclipse-Plugin-Heidenhain/issues/1
:returns: status code or False if something went wrong
:rtype: int
"""
self.login(login=Login.DNC)
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.PGM_STATE))
result = self._send_recive(CMD.R_RI, RSP.S_RI, payload)
if result:
pgm_state = struct.unpack('!H', result)[0]
logging.debug('successfuly read state of active program: %s',
get_program_status_text(pgm_state))
return pgm_state
logging.error('an error occurred while querying program state')
return False
def get_program_stack(self):
"""Get path of currently active nc program(s) and current line number
See https://github.com/tfischer73/Eclipse-Plugin-Heidenhain/issues/1
:returns: dictionary with line number, main program and current program or False if something went wrong
:rtype: dict
"""
self.login(login=Login.DNC)
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.SELECTED_PGM))
result = self._send_recive(CMD.R_RI, RSP.S_RI, payload=payload)
if result:
stack_info = dict()
stack_info['Line'] = struct.unpack('!L', result[:4])[0]
stack_info['Main_PGM'] = result[4:].split(
b'\x00')[0].decode().strip('\x00').replace('\\', '/')
stack_info['Current_PGM'] = result[4:].split(
b'\x00')[1].decode().strip('\x00').replace('\\', '/')
logging.debug(
'successfuly read active program stack and line number: %s', stack_info)
return stack_info
logging.error('an error occurred while querying active program state')
return False
def get_execution_status(self):
"""Get status code of program state to text
See https://github.com/drunsinn/pyLSV2/issues/1
:returns: status code or False if something went wrong
:rtype: int
"""
self.login(login=Login.DNC)
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.EXEC_STATE))
result = self._send_recive(CMD.R_RI, RSP.S_RI, payload)
if result:
exec_state = struct.unpack('!H', result)[0]
logging.debug('read execution state %d : %s', exec_state,
get_execution_status_text(exec_state, 'en'))
return exec_state
logging.error('an error occurred while querying execution state')
return False
def get_directory_info(self, remote_directory=None):
"""Query information a the current working directory on the control
:param str remote_directory: optional. If set, working directory will be changed
:returns: dictionary with info about the directory or False if an error occurred
:rtype: dict
"""
if remote_directory is not None and not self.change_directory(remote_directory):
logging.error(
'could not change current directory to read directory info for %s', remote_directory)
result = self._send_recive(CMD.R_DI, RSP.S_DI)
if result:
dir_info = decode_directory_info(result)
logging.debug(
'successfuly received directory information %s', dir_info)
return dir_info
logging.error('an error occurred while querying directory info')
return False
def change_directory(self, remote_directory):
"""Change the current working directoyon the control
:param str remote_directory: path of directory on the control
:returns: True if changing of directory succeeded
:rtype: bool
"""
dir_path = remote_directory.replace('\\', '/')
payload = bytearray()
payload.extend(map(ord, dir_path))
payload.append(0x00)
if self._send_recive_ack(CMD.C_DC, payload=payload):
logging.debug('changed working directory to %s', dir_path)
return True
logging.error('an error occurred while changing directory')
return False
def get_file_info(self, remote_file_path):
"""Query information about a file
:param str remote_file_path: path of file on the control
:returns: dictionary with info about file of False if remote path does not exist
:rtype: dict
"""
file_path = remote_file_path.replace('\\', '/')
payload = bytearray()
payload.extend(map(ord, file_path))
payload.append(0x00)
result = self._send_recive(CMD.R_FI, RSP.S_FI, payload=payload)
if result:
file_info = decode_file_system_info(result, self._control_type)
logging.debug(
'successfuly received file information %s', file_info)
return file_info
logging.warning(
'an error occurred while querying file info this might also indicate that it does not exist %s', remote_file_path)
return False
def get_directory_content(self):
"""Query content of current working directory from the control.
In some situations it is necessary to fist call get_directory_info() or else
the attributes won't be correct.
:returns: list of dict with info about directory entries
:rtype: list
"""
dir_content = list()
payload = bytearray()
payload.append(ParRDR.SINGLE)
result = self._send_recive_block(CMD.R_DR, RSP.S_DR, payload)
logging.debug(
'received %d entries for directory content information', len(result))
for entry in result:
dir_content.append(decode_file_system_info(entry, self._control_type))
logging.debug(
'successfuly received directory information %s', dir_content)
return dir_content
def get_drive_info(self):
"""Query info all drives and partitions from the control
:returns: list of dict with with info about drive entries
:rtype: list
"""
drives_list = list()
payload = bytearray()
payload.append(ParRDR.DRIVES)
result = self._send_recive_block(CMD.R_DR, RSP.S_DR, payload)
logging.debug('received %d packet of for drive information', len(result))
for entry in result:
drives_list.append(entry)
logging.debug('successfuly received drive information %s', drives_list)
return drives_list
def make_directory(self, dir_path):
"""Create a directory on control. If necessary also creates parent directories
:param str dir_path: path of directory on the control
:returns: True if creating of directory completed successfully
:rtype: bool
"""
path_parts = dir_path.replace('\\', '/').split('/') # convert path to unix style
path_to_check = ''
for part in path_parts:
path_to_check += part + '/'
# no file info -> does not exist and has to be created
if self.get_file_info(path_to_check) is False:
payload = bytearray()
payload.extend(map(ord, path_to_check))
payload.append(0x00) # terminate string
if self._send_recive_ack(command=CMD.C_DM, payload=payload):
logging.debug('Directory created successfuly')
else:
raise Exception(
'an error occurred while creating directory {}'.format(dir_path))
else:
logging.debug('nothing to do as this segment already exists')
return True
def delete_empty_directory(self, dir_path):
"""Delete empty directory on control
:param str file_path: path of directory on the control
:returns: True if deleting of directory completed successfully
:rtype: bool
"""
dir_path = dir_path.replace('\\', '/')
payload = bytearray()
payload.extend(map(ord, dir_path))
payload.append(0x00)
if not self._send_recive_ack(command=CMD.C_DD, payload=payload):
logging.warning(
'an error occurred while deleting directory %s, this might also indicate that it it does not exist', dir_path)
return False
logging.debug('successfuly deleted directory %s', dir_path)
return True
def delete_file(self, file_path):
"""Delete file on control
:param str file_path: path of file on the control
:returns: True if deleting of file completed successfully
:rtype: bool
"""
file_path = file_path.replace('\\', '/')
payload = bytearray()
payload.extend(map(ord, file_path))
payload.append(0x00)
if not self._send_recive_ack(command=CMD.C_FD, payload=payload):
logging.warning(
'an error occurred while deleting file %s, this might also indicate that it it does not exist', file_path)
return False
logging.debug('successfuly deleted file %s', file_path)
return True
def copy_local_file(self, source_path, target_path):
"""Copy file on control from one place to another
:param str source_path: path of file on the control
:param str target_path: path of target location
:returns: True if copying of file completed successfully
:rtype: bool
"""
source_path = source_path.replace('\\', '/')
target_path = target_path.replace('\\', '/')
if '/' in source_path:
# change directory
source_file_name = source_path.split('/')[-1]
source_directory = source_path.rstrip(source_file_name)
if not self.change_directory(remote_directory=source_directory):
raise Exception('could not open the source directoy')
else:
source_file_name = source_path
source_directory = '.'
if target_path.endswith('/'):
target_path += source_file_name
payload = bytearray()
payload.extend(map(ord, source_file_name))
payload.append(0x00)
payload.extend(map(ord, target_path))
payload.append(0x00)
logging.debug('prepare to copy file %s from %s to %s',
source_file_name, source_directory, target_path)
if not self._send_recive_ack(command=CMD.C_FC, payload=payload):
logging.warning(
'an error occurred copying file %s to %s', source_path, target_path)
return False
logging.debug('successfuly copied file %s', source_path)
return True
def move_local_file(self, source_path, target_path):
"""Move file on control from one place to another
:param str source_path: path of file on the control
:param str target_path: path of target location
:returns: True if moving of file completed successfully
:rtype: bool
"""
source_path = source_path.replace('\\', '/')
target_path = target_path.replace('\\', '/')
if '/' in source_path:
source_file_name = source_path.split('/')[-1]
source_directory = source_path.rstrip(source_file_name)
if not self.change_directory(remote_directory=source_directory):
raise Exception('could not open the source directoy')
else:
source_file_name = source_path
source_directory = '.'
if target_path.endswith('/'):
target_path += source_file_name
payload = bytearray()
payload.extend(map(ord, source_file_name))
payload.append(0x00)
payload.extend(map(ord, target_path))
payload.append(0x00)
logging.debug('prepare to move file %s from %s to %s',
source_file_name, source_directory, target_path)
if not self._send_recive_ack(command=CMD.C_FR, payload=payload):
logging.warning('an error occurred moving file %s to %s', source_path, target_path)
return False
logging.debug('successfuly moved file %s', source_path)
return True
def send_file(self, local_path, remote_path, override_file=False, binary_mode=False):
"""Upload a file to control
:param str remote_path: path of file on the control
:param str local_path: local path of destination with or without file name
:param bool override_file: flag if file should be replaced if it already exists
:param bool binary_mode: flag if binary transfer mode should be used, if not set the
file name is checked for known binary file type
:returns: True if transfer completed successfully
:rtype: bool
"""
local_file = Path(local_path)
if not local_file.is_file():
logging.error(
'the supplied path %s did not resolve to a file', local_file)
raise Exception('local file does not exist! {}'.format(local_file))
remote_path = remote_path.replace('\\', '/')
if '/' in remote_path:
if remote_path.endswith('/'): # no filename given
remote_file_name = local_file.name
remote_directory = remote_path
else:
remote_file_name = remote_path.split('/')[-1]
remote_directory = remote_path.rstrip(remote_file_name)
if not self.change_directory(remote_directory=remote_directory):
raise Exception(
'could not open the source directory {}'.format(remote_directory))
else:
remote_file_name = remote_path
remote_directory = self.get_directory_info()['Path'] # get pwd
remote_directory = remote_directory.rstrip('/')
if not self.get_directory_info(remote_directory):
logging.debug('remote path does not exist, create directory(s)')
self.make_directory(remote_directory)
remote_info = self.get_file_info(
remote_directory + '/' + remote_file_name)
if remote_info:
logging.debug('remote path exists and points to file\'s')
if override_file:
if not self.delete_file(remote_directory + '/' + remote_file_name):
raise Exception('something went wrong while deleting file {}'.format(
remote_directory + '/' + remote_file_name))
else:
logging.warning('remote file already exists, override was not set')
return False
logging.debug('ready to send file from %s to %s',
local_file, remote_directory + '/' + remote_file_name)
payload = bytearray()
payload.extend(map(ord, remote_directory + '/' + remote_file_name))
payload.append(0x00)
if binary_mode or self._is_file_type_binary(local_path):
payload.append(MODE_BINARY)
logging.info('selecting binary transfer mode for this file type')
response, content = self._llcom.telegram(CMD.C_FL, payload, buffer_size=self._buffer_size)
if response in RSP.T_OK:
with local_file.open('rb') as input_buffer:
while True:
# use current buffer size but reduce by 10 to make sure it fits together with command and size
buffer = input_buffer.read(self._buffer_size - 10)
if not buffer:
# finished reading file
break
response, content = self._llcom.telegram(
RSP.S_FL, buffer, buffer_size=self._buffer_size)
if response in RSP.T_OK:
pass
else:
if response in RSP.T_ER:
self._decode_error(content)
else:
logging.error('could not send data with error %s', response)
return False
# signal that no more data is being sent
if self._secure_file_send:
if not self._send_recive(command=RSP.T_FD, expected_response=RSP.T_OK, payload=None):
logging.error('could not send end of file with error')
return False
else:
if not self._send_recive(command=RSP.T_FD, expected_response=None, payload=None):
logging.error('could not send end of file with error')
return False
else:
if response in RSP.T_ER:
self._decode_error(content)
else:
logging.error('could not send file with error %s', response)
return False
return True
def recive_file(self, remote_path, local_path, override_file=False, binary_mode=False):
"""Download a file from control
:param str remote_path: path of file on the control
:param str local_path: local path of destination with or without file name
:param bool override_file: flag if file should be replaced if it already exists
:param bool binary_mode: flag if binary transfer mode should be used, if not set the file name is
checked for known binary file type
:returns: True if transfer completed successfully
:rtype: bool
"""
remote_file_info = self.get_file_info(remote_path)
if not remote_file_info:
logging.error('remote file does not exist: %s', remote_path)
return False
local_file = Path(local_path)
if local_file.is_dir():
local_file.joinpath(remote_path.split('/')[-1])
if local_file.is_file():
logging.debug('local path exists and points to file')
if override_file:
local_file.unlink()
else:
logging.warning(
'remote file already exists, override was not set')
return False
logging.debug('loading file from %s to %s', remote_path, local_file)
payload = bytearray()
payload.extend(map(ord, remote_path))
payload.append(0x00)
if binary_mode or self._is_file_type_binary(remote_path):
payload.append(MODE_BINARY) # force binary transfer
logging.info('useing binary transfer mode')
response, content = self._llcom.telegram(CMD.R_FL, payload, buffer_size=self._buffer_size)
with local_file.open('wb') as out_file:
if response in RSP.S_FL:
if binary_mode:
out_file.write(content)
else:
out_file.write(content.replace(b'\x00', b'\r\n'))
logging.debug(
'received first block of file file %s', remote_path)
while True:
response, content = self._llcom.telegram(
RSP.T_OK, payload=None, buffer_size=self._buffer_size)
if response in RSP.S_FL:
if binary_mode:
out_file.write(content)
else:
out_file.write(content.replace(b'\x00', b'\r\n'))
logging.debug(
'received %d more bytes for file', len(content))
elif response in RSP.T_FD:
logging.info('finished loading file')
break
else:
if response in RSP.T_ER or response in RSP.T_BD:
logging.error(
'an error occurred while loading the first block of data for file %s : %s', remote_path, self._decode_error(content))
else:
logging.error(
'something went wrong while receiving file data %s', remote_path)
return False
else:
if response in RSP.T_ER or response in RSP.T_BD:
logging.error('an error occurred while loading the first block of data for file %s : %s',
remote_path, self._decode_error(content))
self._last_error_code = struct.unpack('!BB', content)
else:
logging.error(
'could not load file with error %s', response)
self._last_error_code = None
return False
logging.info('received %d bytes transfer complete for file %s to %s',
local_file.stat().st_size, remote_path, local_file)
return True
def _is_file_type_binary(self, file_name):
"""Check if file is expected to be binary by comparing with known expentions.
:param file_name: name of the file to check
:returns: True if file matches know binary file type
:rtype: bool
"""
for bin_type in BIN_FILES:
if isinstance(file_name, Path):
if file_name.suffix == bin_type:
return True
elif file_name.endswith(bin_type):
return True
return False
def read_plc_memory(self, address, mem_type, count=1):
"""Read data from plc memory.
:param address: which memory location should be read, starts at 0 up to the max number for each type
:param mem_type: what datatype to read
:param count: how many elements should be read at a time, from 1 (default) up to 255 or max number
:returns: a list with the data values
:raises Exception: raises an Exception
"""
if self._sys_par is None:
self.get_system_parameter()
self.login(login=Login.PLCDEBUG)
if mem_type is MemoryType.MARKER:
start_address = self._sys_par['Marker_Start']
max_count = self._sys_par['Markers']
mem_byte_count = 1
unpack_string = '!?'
elif mem_type is MemoryType.INPUT:
start_address = self._sys_par['Input_Start']
max_count = self._sys_par['Inputs']
mem_byte_count = 1
unpack_string = '!?'
elif mem_type is MemoryType.OUTPUT:
start_address = self._sys_par['Output_Start']
max_count = self._sys_par['Outputs']
mem_byte_count = 1
unpack_string = '!?'
elif mem_type is MemoryType.COUNTER:
start_address = self._sys_par['Counter_Start']
max_count = self._sys_par['Counters']
mem_byte_count = 1
unpack_string = '!?'
elif mem_type is MemoryType.TIMER:
start_address = self._sys_par['Timer_Start']
max_count = self._sys_par['Timers']
mem_byte_count = 1
unpack_string = '!?'
elif mem_type is MemoryType.BYTE:
start_address = self._sys_par['Word_Start']
max_count = self._sys_par['Words'] * 2
mem_byte_count = 1
unpack_string = '!B'
elif mem_type is MemoryType.WORD:
start_address = self._sys_par['Word_Start']
max_count = self._sys_par['Words']
mem_byte_count = 2
unpack_string = '<H'
elif mem_type is MemoryType.DWORD:
start_address = self._sys_par['Word_Start']
max_count = self._sys_par['Words'] / 4
mem_byte_count = 4
unpack_string = '<L'
elif mem_type is MemoryType.STRING:
start_address = self._sys_par['String_Start']
max_count = self._sys_par['Strings']
mem_byte_count = self._sys_par['String_Length']
unpack_string = '{}s'.format(mem_byte_count)
elif mem_type is MemoryType.INPUT_WORD:
start_address = self._sys_par['Input_Word_Start']
max_count = self._sys_par['Input']
mem_byte_count = 2
unpack_string = '<H'
elif mem_type is MemoryType.OUTPUT_WORD:
start_address = self._sys_par['Output_Word_Start']
max_count = self._sys_par['Output_Words']
mem_byte_count = 2
unpack_string = '<H'
else:
raise Exception('unknown address type')
if count > max_count:
raise Exception('maximum number of values is %d' % max_count)
if count > 0xFF:
raise Exception('cant read more than 255 elements at a time')
plc_values = list()
if mem_type is MemoryType.STRING:
# advance address if necessary
address = address + (count - 1) * mem_byte_count
for i in range(count):
payload = bytearray()
payload.extend(struct.pack(
'!L', start_address + address + i * mem_byte_count))
payload.extend(struct.pack('!B', mem_byte_count))
result = self._send_recive(CMD.R_MB, RSP.S_MB, payload=payload)
if result:
logging.debug('read string %d', address + i * mem_byte_count)
plc_values.append(struct.unpack(unpack_string, result)[
0].rstrip(b'\x00').decode('utf8'))
else:
logging.error('faild to read string from address %d',
start_address + address + i * mem_byte_count)
return False
else:
payload = bytearray()
payload.extend(struct.pack('!L', start_address + address))
payload.extend(struct.pack('!B', count * mem_byte_count))
result = self._send_recive(CMD.R_MB, RSP.S_MB, payload=payload)
if result:
logging.debug('read %d value(s) from address %d', count, address)
for i in range(0, len(result), mem_byte_count):
plc_values.append(struct.unpack(
unpack_string, result[i:i+mem_byte_count])[0])
else:
logging.error('faild to read string from address %d',
start_address + address)
return False
return plc_values
def set_keyboard_access(self, unlocked):
"""Enable or disable the keyboard on the control. Requires access level MONITOR to work.
:param bool unlocked: if True unlocks the keyboard. if false, input is set to locked
:returns: True or False if command was executed successfully
:rtype: bool
"""
payload = bytearray()
if unlocked:
payload.extend(struct.pack('!B', 0x00))
else:
payload.extend(struct.pack('!B', 0x01))
result = self._send_recive(CMD.C_LK, RSP.T_OK, payload=payload)
if result:
if unlocked:
logging.debug('command to unlock keyboard was successful')
else:
logging.debug('command to lock keyboard was successful')
return True
else:
logging.warning(
'an error occurred changing the state of the keyboard lock')
return False
def get_machine_parameter(self, name):
"""Read machine parameter from control. Requires access INSPECT level to work.
:param str name: name of the machine parameter. For iTNC the parameter number hase to be converted to string
:returns: value of parameter or False if command not successful
:rtype: str or bool
"""
payload = bytearray()
payload.extend(map(ord, name))
payload.append(0x00)
result = self._send_recive(CMD.R_MC, RSP.S_MC, payload=payload)
if result:
value = result.rstrip(b'\x00').decode('utf8')
logging.debug('machine parameter %s has value %s', name, value)
return value
logging.warning(
'an error occurred while reading machine parameter %s', name)
return False
def set_machine_parameter(self, name, value, safe_to_disk=False):
"""Set machine parameter on control. Requires access PLCDEBUG level to work.
Writing a parameter takes some time, make sure to set timeout sufficiently high!
:param str name: name of the machine parameter. For iTNC the parameter number hase to be converted to string
:param str value: new value of the machine parameter. There is no type checking, if the value can not be converted by the control an error will be sent.
:param bool safe_to_disk: If True the new value will be written to the harddisk and stay permanent. If False (default) the value will only be available until the next reboot.
:returns: True or False if command was executed successfully
:rtype: bool
"""
payload = bytearray()
if safe_to_disk:
payload.extend(struct.pack('!L', 0x00))
else:
payload.extend(struct.pack('!L', 0x01))
payload.extend(map(ord, name))
payload.append(0x00)
payload.extend(map(ord, value))
payload.append(0x00)
result = self._send_recive(CMD.C_MC, RSP.T_OK, payload=payload)
if result:
logging.debug(
'setting of machine parameter %s to value %s was successful', name, value)
return True
logging.warning(
'an error occurred while setting machine parameter %s to value %s', name, value)
return False
def send_key_code(self, key_code):
"""Send key code to control. Behaves as if the associated key was pressed on the
keyboard. Requires access MONITOR level to work. To work correctly you first
have to lock the keyboard and unlock it afterwards!:
set_keyboard_access(False)
send_key_code(KeyCode.CE)
set_keyboard_access(True)
:param int key_code: code number of the keyboard key
:returns: True or False if command was executed successfully
:rtype: bool
"""
payload = bytearray()
payload.extend(struct.pack('!H', key_code))
result = self._send_recive(CMD.C_EK, RSP.T_OK, payload=payload)
if result:
logging.debug('sending the key code %d was successful', key_code)
return True
logging.warning(
'an error occurred while sending the key code %d', key_code)
return False
def get_spindle_tool_status(self):
"""Get information about the tool currently in the spindle
:returns: tool information or False if something went wrong
:rtype: dict
"""
self.login(login=Login.DNC)
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.CURRENT_TOOL))
result = self._send_recive(CMD.R_RI, RSP.S_RI, payload)
if result:
tool_info = decode_tool_information(result)
logging.debug(
'successfuly read info on current tool: %s', tool_info)
return tool_info
logging.warning(
'an error occurred while querying current tool information. This does not work for all control types')
return False
def get_override_info(self):
"""Get information about the override info
:returns: override information or False if something went wrong
:rtype: dict
"""
self.login(login=Login.DNC)
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.OVERRIDE))
result = self._send_recive(CMD.R_RI, RSP.S_RI, payload)
if result:
override_info = decode_override_information(result)
logging.debug(
'successfuly read override info: %s', override_info)
return override_info
logging.warning(
'an error occurred while querying current override information. This does not work for all control types')
return False
def get_error_messages(self):
"""Get information about the first or next error displayed on the control
:param bool next_error: if True check if any further error messages are available
:returns: error information or False if something went wrong
:rtype: dict
"""
messages = list()
self.login(login=Login.DNC)
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.FIRST_ERROR))
result = self._send_recive(CMD.R_RI, RSP.S_RI, payload)
if result:
messages.append(decode_error_message(result))
payload = bytearray()
payload.extend(struct.pack('!H', ParRRI.NEXT_ERROR))
result = self._send_recive(
CMD.R_RI, RSP.S_RI, payload)
logging.debug('successfuly read first error but further errors')
while result:
messages.append(decode_error_message(result))
result = self._send_recive(
CMD.R_RI, RSP.S_RI, payload)
if self._last_error_code[1] == LSV2Err.T_ER_NO_NEXT_ERROR:
logging.debug('successfuly read all errors')
else:
logging.warning(
'an error occurred while querying error information.')
return messages
elif self._last_error_code[1] == LSV2Err.T_ER_NO_NEXT_ERROR:
logging.debug('successfuly read first error but no error active')
return messages
logging.warning(
'an error occurred while querying error information. This does not work for all control types')
return False
def _walk_dir(self, descend=True):
"""helber function to recursively search in directories for files
:param bool descend: control if search should run recursively
:returns: list of files found in directory
:rtype: list
"""
current_path = self.get_directory_info()['Path']
content = list()
for entry in self.get_directory_content():
if entry['Name'] == '.' or entry['Name'] == '..' or entry['Name'].endswith(':'):
continue
if entry['is_directory'] is True and descend is True:
if self.change_directory(current_path + entry['Name']):
content.extend(self._walk_dir())
else:
content.append(current_path + entry['Name'])
self.change_directory(current_path)
return content
def get_file_list(self, path=None, descend=True, pattern=None):
"""Get list of files in directory structure.
:param str path: path of the directory where files should be searched. if None than the current directory is used
:param bool descend: control if search should run recursiv
:param str pattern: regex string to filter the file names
:returns: list of files found in directory
:rtype: list
"""
if path is not None:
if self.change_directory(path) is False:
logging.warning('could not change to directory')
return None
if pattern is None:
file_list = self._walk_dir(descend)
else:
file_list = list()
for entry in self._walk_dir(descend):
file_name = entry.split('/')[-1]
if re.match(pattern, file_name):
file_list.append(entry)
return file_list
def read_data_path(self, path):
"""Read values from control via data path. Only works on iTNC controls.
For ease of use, the path is formatted by replacing / by \\ and " by '.
:param str path: data path from which to read the value.
:returns: data value read from control formatted in nativ data type or None if reading was not successful
"""
if not self.is_itnc():
logging.warning('Reading values from data path does not work on non iTNC controls!')
path = path.replace('/', '\\').replace('"', '\'')
self.login(login=Login.DATA)
payload = bytearray()
payload.extend(b'\x00') # <- ???
payload.extend(b'\x00') # <- ???
payload.extend(b'\x00') # <- ???
payload.extend(b'\x00') # <- ???
payload.extend(map(ord, path))
payload.append(0x00) # escape string
result = self._send_recive(CMD.R_DP, RSP.S_DP, payload)
if result:
value_type = struct.unpack('!L', result[0:4])[0]
if value_type == 2:
data_value = struct.unpack('!h', result[4:6])[0]
elif value_type == 3:
data_value = struct.unpack('!l', result[4:8])[0]
elif value_type == 5:
data_value = struct.unpack('<d', result[4:12])[0]
elif value_type == 8:
data_value = result[4:].strip(b'\x00').decode('utf-8')
elif value_type == 11:
data_value = struct.unpack('!?', result[4:5])[0]
elif value_type == 16:
data_value = struct.unpack('!b', result[4:5])[0]
elif value_type == 17:
data_value = struct.unpack('!B', result[4:5])[0]
else:
raise Exception('unknown return type: %d for %s' % (value_type, result[4:]))
logging.info(
'successfuly read data path: %s and got value "%s"', path, data_value)
return data_value
logging.warning(
'an error occurred while querying data path "%s". This does not work for all control types', path)
return None
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
from profile import work_path
def get_count_with_gene(position_dict, base_count):
# gene_list = []
count = []
for key in position_dict:
array_ = np.array([])
for (x,y) in position_dict[key]:
# print(base_count[x - 1: y])
array_ = np.append(array_, base_count[x - 1: y])
# gene_list.append(key)
count.append(array_)
return count
def count_per_gene(gene, count):
plt.figure(dpi=100)
plt.style.use('ggplot')
plt.title("kernel density estimation(Gaussian):" + gene)
plt.xlabel("count")
plt.ylabel("f(count)")
# print(count)
count_ = np.unique(count)
if count_.shape[0] > 1:
sns.kdeplot(count, data2=None, kernel='gau', bw="scott",
label='scott', linewidth=0.3, shade=True)
sns.kdeplot(count, data2=None, kernel='gau', bw="silverman",
label='silverman', linewidth=0.3, shade=True)
plt.savefig(work_path + '/hist_pics/'+ str(gene))
plt.close()
|
import random
import numpy as np
import ImagesManager as imageManager
class GeneticAlgorithm:
def initRandomPopulation(self, amountPopulation, amountClusterings):
population = [[0 for personCluster in range(amountClusterings)] for personInPopulation in
range(amountPopulation)]
for personInPopulation in range(amountPopulation):
initRandom = 0
finalRandom = amountClusterings - 1
positionCluster = random.randint(initRandom, finalRandom)
population[personInPopulation][positionCluster] = 1
return population
def calculateFitness(self, population, dissimilarityMatrix, amountClusterings):
populationFitness = []
for lineIndex in range(0, len(population)):
sumClusterMembers = 0
sumMemberFitness = 0
member = population[lineIndex]
for clusterPosition in range(amountClusterings):
if (member[clusterPosition] == 1):
sumClusterMembers = sumClusterMembers + 1
for columnIndex in range(len(population)):
if (population[columnIndex][clusterPosition] == 1):
sumClusterMembers = sumClusterMembers + 1
sumMemberFitness = sumMemberFitness + (dissimilarityMatrix[lineIndex][columnIndex])
populationFitness.append(sumMemberFitness/sumClusterMembers * 0.5 * len(population))
return populationFitness
def raffleFathersUsingTournament(self, allPopulations, allFitness):
selectedFathersIndex = []
selectedFitnessFathers = []
positionsRaffled = []
allFathersSorted = False
while (allFathersSorted == False):
canSelectFather = True
initRandom = 0
finalRandom = len(allPopulations[0])
selectedPosition = random.randint(initRandom, finalRandom)
for index in range(len(positionsRaffled)):
if(positionsRaffled[index] == selectedPosition):
canSelectFather = False
break
else:
canSelectFather = True
if(canSelectFather):
positionsRaffled.append(selectedPosition)
selectedFathersIndex.append(selectedPosition)
selectedFitnessFathers.append(allFitness[selectedPosition])
if(len(selectedFathersIndex) == 3):
allFathersSorted = True
betterFatherIndex = 0
betterFitness = 100000
for indexFather in range(len(selectedFathersIndex)):
# print(selectedFitnessFathers[indexFather], selectedFitnessFathers[indexFather] < betterFitness)
if(selectedFitnessFathers[indexFather] < betterFitness):
betterFitness = selectedFitnessFathers[indexFather]
betterFatherIndex = selectedFathersIndex[indexFather]
return betterFatherIndex
def fathersCrossover(self, father, mother):
childs = []
amountChildGenes = len(father)
pointForCourt = int(amountChildGenes / 2)
for indexChild in range(2):
child = []
for motherIndex in range(0, pointForCourt):
if(indexChild == 0):
child.append(mother[motherIndex])
else:
child.append(father[motherIndex])
for fatherIndex in range(pointForCourt - 1, amountChildGenes - 1):
if (indexChild == 0):
child.append(father[fatherIndex])
else:
child.append(mother[fatherIndex])
childs.append(child)
return childs
def childMutation(self, child, percentage):
initRandom = 0
finalRandom = 100
canMutation = random.randint(initRandom, finalRandom)
if(canMutation < percentage):
populationLine = random.randint(initRandom, len(child) - 1)
for indexGene in range(len(child[populationLine])):
child[populationLine][indexGene] = 0
genePosition = random.randint(initRandom, len(child[populationLine]) -1)
child[populationLine][genePosition] = 1
return child
def insertChildInPopulationWithElitism(self, allFitness):
lessFitness = max(allFitness)
positionLessFitness = allFitness.index(lessFitness)
return positionLessFitness
dissimilarityMatrix = imageManager.getDissimilarityMatrixImagesForClustering(16, 'Images')
algorithmGenetic = GeneticAlgorithm()
populations = []
populationsFitness = []
numberOfCluster = 4
for populationIndex in range(0, 100):
population = algorithmGenetic.initRandomPopulation(len(dissimilarityMatrix), numberOfCluster)
populationFitness = algorithmGenetic.calculateFitness(population, dissimilarityMatrix, numberOfCluster)
populations.append(population)
populationsFitness.append(np.sum(populationFitness))
# for index in range(len(populations)):
# print('Fitness da População: ', populationsFitness[index])
for generation in range(0, 100000):
mutationRate = 25
fatherIndex = algorithmGenetic.raffleFathersUsingTournament(populations, populationsFitness)
motherIndex = algorithmGenetic.raffleFathersUsingTournament(populations, populationsFitness)
childs = algorithmGenetic.fathersCrossover(populations[fatherIndex], populations[motherIndex])
finalChilds = []
firstMutantChild = algorithmGenetic.childMutation(childs[0], mutationRate)
secondMutantChild = algorithmGenetic.childMutation(childs[1], mutationRate)
finalChilds.append(firstMutantChild)
finalChilds.append(secondMutantChild)
childFitness = algorithmGenetic.calculateFitness(finalChilds[0], dissimilarityMatrix, numberOfCluster)
positionLessFitness = algorithmGenetic.insertChildInPopulationWithElitism(populationsFitness)
populationsFitness[positionLessFitness] = np.sum(childFitness)
populations[positionLessFitness] = finalChilds[0]
childFitness = algorithmGenetic.calculateFitness(finalChilds[1], dissimilarityMatrix, numberOfCluster)
positionLessFitness = algorithmGenetic.insertChildInPopulationWithElitism(populationsFitness)
populationsFitness[positionLessFitness] = np.sum(childFitness)
populations[positionLessFitness] = finalChilds[1]
print('Generation: ', generation, 'Best Fitness: ', min(populationsFitness))
finalPopulation = min(populations)
contExibition = 0
print('-----------------------------')
for lineResultIndex in range(0, len(finalPopulation)):
contExibition = contExibition + 1
print(finalPopulation[lineResultIndex])
if(contExibition == 4):
print('-----------------------------')
contExibition = 0
# print(np.matrix(min(populations)))
# print(np.matrix(max(populations)))
# print(np.matrix(populationFitness))
|
import os
import numpy as np
import pennylane as qml
import pytest
from qiskit import IBMQ
from qiskit.providers.ibmq.exceptions import IBMQAccountError
from pennylane_qiskit import IBMQDevice
from pennylane_qiskit import ibmq as ibmq
from pennylane_qiskit import qiskit_device as qiskit_device
@pytest.fixture
def token():
"""A fixture loading the IBMQ token from the IBMQX_TOKEN_TEST environment
variable."""
t = os.getenv("IBMQX_TOKEN_TEST", None)
if t is None:
pytest.skip("Skipping test, no IBMQ token available")
yield t
IBMQ.disable_account()
def test_load_from_env(token, monkeypatch):
"""test loading an IBMQ device from
an env variable"""
monkeypatch.setenv("IBMQX_TOKEN", token)
dev = IBMQDevice(wires=1)
assert dev.provider.credentials.is_ibmq()
def test_load_kwargs_takes_precedence(token, monkeypatch):
"""Test that with a potentially valid token stored as an environment
variable, passing the token as a keyword argument takes precedence."""
monkeypatch.setenv("IBMQX_TOKEN", "SomePotentiallyValidToken")
dev = IBMQDevice(wires=1, ibmqx_token=token)
assert dev.provider.credentials.is_ibmq()
def test_account_already_loaded(token):
"""Test loading an IBMQ device using
an already loaded account"""
IBMQ.enable_account(token)
dev = IBMQDevice(wires=1)
assert dev.provider.credentials.is_ibmq()
class MockQiskitDeviceInit:
"""A mocked version of the QiskitDevice __init__ method which
is called on by the IBMQDevice"""
def mocked_init(self, wires, provider, backend, shots, **kwargs):
"""Stores the provider which QiskitDevice.__init__ was
called with."""
self.provider = provider
def test_custom_provider(monkeypatch):
"""Tests that a custom provider can be passed when creating an IBMQ
device."""
mock_provider = "MockProvider"
mock_qiskit_device = MockQiskitDeviceInit()
with monkeypatch.context() as m:
m.setattr(ibmq.QiskitDevice, "__init__", mock_qiskit_device.mocked_init)
m.setattr(ibmq.IBMQ, "enable_account", lambda *args, **kwargs: None)
# Here mocking to a value such that it is not None
m.setattr(ibmq.IBMQ, "active_account", lambda *args, **kwargs: True)
dev = IBMQDevice(wires=2, backend="ibmq_qasm_simulator", provider=mock_provider)
assert mock_qiskit_device.provider == mock_provider
def mock_get_provider(*args, **kwargs):
"""A mock function for the get_provider Qiskit function to record the
arguments which it was called with."""
return (args, kwargs)
def test_default_provider(monkeypatch):
"""Tests that the default provider is used when no custom provider was
specified."""
mock_qiskit_device = MockQiskitDeviceInit()
with monkeypatch.context() as m:
m.setattr(ibmq.QiskitDevice, "__init__", mock_qiskit_device.mocked_init)
m.setattr(ibmq.IBMQ, "get_provider", mock_get_provider)
m.setattr(ibmq.IBMQ, "enable_account", lambda *args, **kwargs: None)
# Here mocking to a value such that it is not None
m.setattr(ibmq.IBMQ, "active_account", lambda *args, **kwargs: True)
dev = IBMQDevice(wires=2, backend="ibmq_qasm_simulator")
assert mock_qiskit_device.provider[0] == ()
assert mock_qiskit_device.provider[1] == {"hub": "ibm-q", "group": "open", "project": "main"}
def test_custom_provider_hub_group_project(monkeypatch):
"""Tests that the custom arguments passed during device instantiation are
used when calling get_provider."""
mock_qiskit_device = MockQiskitDeviceInit()
custom_hub = "SomeHub"
custom_group = "SomeGroup"
custom_project = "SomeProject"
with monkeypatch.context() as m:
m.setattr(ibmq.QiskitDevice, "__init__", mock_qiskit_device.mocked_init)
m.setattr(ibmq.IBMQ, "get_provider", mock_get_provider)
m.setattr(ibmq.IBMQ, "enable_account", lambda *args, **kwargs: None)
# Here mocking to a value such that it is not None
m.setattr(ibmq.IBMQ, "active_account", lambda *args, **kwargs: True)
dev = IBMQDevice(
wires=2,
backend="ibmq_qasm_simulator",
hub=custom_hub,
group=custom_group,
project=custom_project,
)
assert mock_qiskit_device.provider[0] == ()
assert mock_qiskit_device.provider[1] == {
"hub": custom_hub,
"group": custom_group,
"project": custom_project,
}
def test_load_from_disk(token):
"""Test loading the account credentials and the device from disk."""
IBMQ.save_account(token)
dev = IBMQDevice(wires=1)
assert dev.provider.credentials.is_ibmq()
IBMQ.delete_account()
def test_account_error(monkeypatch):
"""Test that an error is raised if there is no active IBMQ account."""
# Token is passed such that the test is skipped if no token was provided
with pytest.raises(IBMQAccountError, match="No active IBM Q account"):
with monkeypatch.context() as m:
m.delenv("IBMQX_TOKEN", raising=False)
IBMQDevice(wires=1)
@pytest.mark.parametrize("shots", [1000])
def test_simple_circuit(token, tol, shots):
"""Test executing a simple circuit submitted to IBMQ."""
IBMQ.enable_account(token)
dev = IBMQDevice(wires=2, backend="ibmq_qasm_simulator", shots=shots)
@qml.qnode(dev)
def circuit(theta, phi):
qml.RX(theta, wires=0)
qml.RX(phi, wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
theta = 0.432
phi = 0.123
res = circuit(theta, phi)
expected = np.array([np.cos(theta), np.cos(theta) * np.cos(phi)])
assert np.allclose(res, expected, **tol)
@pytest.mark.parametrize("shots", [1000])
def test_simple_circuit_with_batch_params(token, tol, shots, mocker):
"""Test that executing a simple circuit with batched parameters is
submitted to IBMQ once."""
IBMQ.enable_account(token)
dev = IBMQDevice(wires=2, backend="ibmq_qasm_simulator", shots=shots)
@qml.batch_params
@qml.qnode(dev)
def circuit(theta, phi):
qml.RX(theta, wires=0)
qml.RX(phi, wires=1)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
# Check that we run only once
spy1 = mocker.spy(dev, "batch_execute")
spy2 = mocker.spy(dev.backend, "run")
# Batch the input parameters
batch_dim = 3
theta = np.linspace(0, 0.543, batch_dim)
phi = np.linspace(0, 0.123, batch_dim)
res = circuit(theta, phi)
assert np.allclose(res[:, 0], np.cos(theta), **tol)
assert np.allclose(res[:, 1], np.cos(theta) * np.cos(phi), **tol)
# Check that IBMQBackend.run was called once
assert spy1.call_count == 1
assert spy2.call_count == 1
@pytest.mark.parametrize("shots", [1000])
def test_batch_execute_parameter_shift(token, tol, shots, mocker):
"""Test that devices provide correct result computing the gradient of a
circuit using the parameter-shift rule and the batch execution pipeline."""
IBMQ.enable_account(token)
dev = IBMQDevice(wires=3, backend="ibmq_qasm_simulator", shots=shots)
spy1 = mocker.spy(dev, "batch_execute")
spy2 = mocker.spy(dev.backend, "run")
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x, y):
qml.RX(x, wires=[0])
qml.RY(y, wires=[1])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0) @ qml.PauliX(1) @ qml.PauliZ(2))
x = qml.numpy.array(0.543, requires_grad=True)
y = qml.numpy.array(0.123, requires_grad=True)
res = qml.grad(circuit)(x, y)
expected = np.array([[-np.sin(y) * np.sin(x), np.cos(y) * np.cos(x)]])
assert np.allclose(res, expected, **tol)
# Check that QiskitDevice.batch_execute was called once
assert spy1.call_count == 2
# Check that run was called twice: for the partial derivatives and for
# running the circuit
assert spy2.call_count == 2
@pytest.mark.parametrize("shots", [1000])
def test_probability(token, tol, shots):
"""Test that the probs function works."""
IBMQ.enable_account(token)
dev = IBMQDevice(wires=2, backend="ibmq_qasm_simulator", shots=shots)
dev_analytic = qml.device("default.qubit", wires=2, shots=None)
x = [0.2, 0.5]
def circuit(x):
qml.RX(x[0], wires=0)
qml.RY(x[1], wires=0)
qml.CNOT(wires=[0, 1])
return qml.probs(wires=[0, 1])
prob = qml.QNode(circuit, dev)
prob_analytic = qml.QNode(circuit, dev_analytic)
# Calling the hardware only once
hw_prob = prob(x)
assert np.isclose(hw_prob.sum(), 1, **tol)
assert np.allclose(prob_analytic(x), hw_prob, **tol)
assert not np.array_equal(prob_analytic(x), hw_prob)
def test_track(token):
"""Test that the tracker works."""
IBMQ.enable_account(token)
dev = IBMQDevice(wires=1, backend="ibmq_qasm_simulator", shots=1)
dev.tracker.active = True
@qml.qnode(dev)
def circuit():
qml.PauliX(wires=0)
return qml.probs(wires=0)
circuit()
assert "job_time" in dev.tracker.history
if "job_time" in dev.tracker.history:
assert "creating" in dev.tracker.history["job_time"][0]
assert "validating" in dev.tracker.history["job_time"][0]
assert "queued" in dev.tracker.history["job_time"][0]
assert "running" in dev.tracker.history["job_time"][0]
assert len(dev.tracker.history["job_time"][0]) == 4
|
# this file normalize the pure data
import pandas as pd
import numpy as np
import math
def normalize(srcName, targetName, mode):
"""
normalize the pure data
:param srcName: pure csv name
:param targetName: norm csv name
:param mode: the mode of normalize
:return: None
"""
dataDf = pd.read_csv(srcName, index_col=0)
dataArr = np.array(dataDf)
dataList = dataArr.tolist()
dataNormList = []
# Nose – 0, Neck – 1, Right Shoulder – 2, Right Elbow – 3, Right Wrist – 4, Left Shoulder – 5, Left Elbow – 6,
# Left Wrist – 7, Right Hip – 8, Right Knee – 9, Right Ankle – 10, Left Hip – 11, Left Knee – 12, LAnkle – 13,
# Right Eye – 14, Left Eye – 15, Right Ear – 16, Left Ear – 17
if mode == "angle":
for line in dataList:
normal_line = []
# aspect ratio of torso
min_x = min(line[2], line[4], line[10], line[16], line[18], line[22], line[24])
max_x = max(line[2], line[4], line[10], line[16], line[18], line[22], line[24])
min_y = min(line[3], line[5], line[11], line[17], line[19], line[23], line[25])
max_y = max(line[3], line[5], line[11], line[17], line[19], line[23], line[25])
aspect_ratio = (max_x - min_x) / (max_y - min_y)
normal_line.append(aspect_ratio)
# angle of knee-neck and the vertical line
angle_left = math.degrees(math.atan2(line[24]-line[2], line[25]-line[3]))
angle_right = math.degrees(math.atan2(line[18]-line[2], line[19]-line[3]))
normal_line.append(angle_left)
normal_line.append(angle_right)
dataNormList.append(normal_line)
dataNormDf = pd.DataFrame(dataNormList, columns=['ratio', 'LAngle', 'RAngle'])
dataNormDf.to_csv(targetName)
else:
for line in dataList:
if mode == "thigh_len":
# the length of right thigh
thigh = math.sqrt((line[16]-line[18])**2 + (line[17]-line[19])**2)
if thigh == 0:
thigh = 0.01
centerX = line[2]
centerY = line[3]
for i in range(len(line)):
if i % 2 == 0:
line[i] = (line[i]-centerX) / thigh
else:
line[i] = (line[i]-centerY) / thigh
elif mode == "torso_box":
# min_x, min_y, max_x, max_y of torso
min_x = min(line[4], line[10], line[16], line[18], line[22], line[24])
max_x = max(line[4], line[10], line[16], line[18], line[22], line[24])
min_y = min(line[5], line[11], line[17], line[19], line[23], line[25])
max_y = max(line[5], line[11], line[17], line[19], line[23], line[25])
centerX = line[2]
centerY = line[3]
# calculate the normalize index
x_diff = max_x - min_x
y_diff = max_y - min_y
if x_diff == 0:
x_diff = 0.01
if y_diff == 0:
y_diff = 0.01
for i in range(len(line)):
if i % 2 == 0:
line[i] = (line[i]-centerX) / x_diff
else:
line[i] = (line[i]-centerY) / y_diff
elif mode == "none":
centerX = line[2]
centerY = line[3]
for i in range(len(line)):
if i % 2 == 0:
line[i] = line[i] - centerX
else:
line[i] = line[i] - centerY
dataNormList.append(line)
dataNormDf = pd.DataFrame(dataNormList, columns=['0x', '0y', '1x', '1y', '2x', '2y', '3x', '3y', '4x', '4y',
'5x', '5y', '6x', '6y', '7x', '7y', '8x', '8y', '9x', '9y',
'10x', '10y', '11x', '11y', '12x', '12y', '13x', '13y'])
dataNormDf.to_csv(targetName)
normalize("data/ADLPure.csv", "data/normalized/ADLNormAngle.csv", mode="angle")
normalize("data/ADLPure.csv", "data/normalized/ADLNormThigh.csv", mode="thigh_len")
normalize("data/ADLPure.csv", "data/normalized/ADLNormTorso.csv", mode="torso_box")
normalize("data/ADLPure.csv", "data/normalized/ADLNormNone.csv", mode="none")
normalize("data/FallPure.csv", "data/normalized/FallNormAngle.csv", mode="angle")
normalize("data/FallPure.csv", "data/normalized/FallNormThigh.csv", mode="thigh_len")
normalize("data/FallPure.csv", "data/normalized/FallNormTorso.csv", mode="torso_box")
normalize("data/FallPure.csv", "data/normalized/FallNormNone.csv", mode="none")
normalize("data/ADLPureTest.csv", "data/normalized/ADLNormAngleTest.csv", mode="angle")
normalize("data/ADLPureTest.csv", "data/normalized/ADLNormThighTest.csv", mode="thigh_len")
normalize("data/ADLPureTest.csv", "data/normalized/ADLNormTorsoTest.csv", mode="torso_box")
normalize("data/ADLPureTest.csv", "data/normalized/ADLNormNoneTest.csv", mode="none")
normalize("data/FallPureTest.csv", "data/normalized/FallNormAngleTest.csv", mode="angle")
normalize("data/FallPureTest.csv", "data/normalized/FallNormThighTest.csv", mode="thigh_len")
normalize("data/FallPureTest.csv", "data/normalized/FallNormTorsoTest.csv", mode="torso_box")
normalize("data/FallPureTest.csv", "data/normalized/FallNormNoneTest.csv", mode="none")
|
from django.urls import path, re_path
from . import views
urlpatterns = [
# coachings basic urls
re_path(r'^coachings/create/$', view=views.SimpleCoachingCreateAPIView.as_view()),
re_path(r'^coachings/detail/(?P<id>[\w\-]+)/$', view=views.SimpleCoachingDetailView.as_view()),
re_path(r'^coachings/all/$', view=views.SimpleCoachingListAPIView.as_view()),
re_path(r'^coachings/search/$', view=views.CoachingSearchAPIView.as_view()),
# coaching advance urls
re_path(r'^coachings/detail/advance/(?P<id>[\w\-]+)/$', view=views.AdvanceCoachingDetailView.as_view()),
# branch urls
re_path(r'^coachings/branch/create/', view=views.BranchCreateAPIView.as_view()),
re_path(r'^coachings/branch/update/(?P<id>[\w\-]+)/$', view=views.BranchDetailView.as_view()),
# Address urls
re_path(r'^coachings/address/create/', view=views.AddressCreateAPIView.as_view()),
re_path(r'^coachings/address/update/(?P<id>[\w\-]+)/$', view=views.AddressDetailView.as_view()),
# Course urls
re_path(r'^coachings/course/create/', view=views.CourseCreateAPIView.as_view()),
re_path(r'^coachings/course/update/(?P<id>[\w\-]+)/$', view=views.CourseDetailView.as_view()),
# Batch urls
re_path(r'^coachings/batch/create/', view=views.BatchCreateAPIView.as_view()),
re_path(r'^coachings/batch/update/(?P<id>[\w\-]+)/$', view=views.BatchDetailView.as_view()),
# CoachingFacultyMember urls
re_path(r'^coachings/faculty/create/', view=views.CoachingFacultyMemberCreateAPIView.as_view()),
re_path(r'^coachings/faculty/update/(?P<id>[\w\-]+)/$', view=views.CoachingFacultyMemberDetailView.as_view()),
re_path(r'^coachings/faculty/list/(?P<coaching_id>[\w\-]+)/$', view=views.CoachingFacultyMemberListAPIView.as_view()),
# CoachingMetaData
re_path(r'^coachings/meta/create/', view=views.CoachingMetaDataCreateAPIView.as_view()),
re_path(r'^coachings/meta/update/(?P<id>[\w\-]+)/$', view=views.CoachingMetaDataDetailView.as_view()),
]
|
#writing a program that is event based and also use continue.
Denominator=0;
Numerator=0;
while(Denominator!=-1):
print("Enter the Numerator");
Numerator=float(input());
print(("Enter the Denominator:"));
Denominator=float(input());
if(Denominator==0):
continue;
print(Numerator/Denominator);
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 10 19:31:48 2019
@author: hsauro
"""
import math, numpy as np, time
import pylab, scipy.stats
np.random.seed (1232)
# Generate some synthetic data
dx=[]
dy=[]
theta = [20, 6, 0.4]
for x in np.arange (0, 40, 0.5):
dx.append (x)
dy.append (np.random.normal(theta[0]+theta[1]*math.sin(2.*math.pi/24.*x+theta[2]), 2))
pylab.plot (dx, dy)
pylab.show()
np.random.seed(int (time.time()))
# Fit this Model to the data
def model (x,theta):
return theta[0] + theta[1]*math.sin((2.0*math.pi/24.0)*x + theta[2])
# Chi Squared
def chi2(dx, dy, theta):
s = 0.0
for i in range(len(dx)):
s += (model(dx[i], theta) - dy[i])**2
t = s/len (dx)
return t
# Likelihood function
def P(dx, dy, theta):
#return -chi2(dx, dy, theta)
return math.exp (-chi2(dx, dy, theta))
# Initial guess for model parameters
theta1 = []; theta2 = []; theta3 = []
theta_current = [0.,0.,0.]
theta_proposed = [0.,0.,0.]
P_current = P(dx, dy, theta_current)
chain = []
for i in range(40000):
theta_proposed = [theta_current[0]+0.1*np.random.randn(),
theta_current[1]+0.1*np.random.randn(),
theta_current[2]+0.1*np.random.randn()]
P_proposed = P (dx, dy, theta_proposed)
ratio = min (1, P_proposed/P_current)
r = np.random.rand()
if ratio > r:
theta_current = theta_proposed
P_current = P_proposed
if i >= 10000: # save chain only after burnin
chain.append(theta_current)
theta1.append (theta_current[0])
theta2.append (theta_current[1])
theta3.append (theta_current[2])
# Plot model and fitted data
My = []
for x in dx:
My.append(model(x, theta_current))
pylab.plot (dx, dy, "o", markerfacecolor='red', markeredgewidth=0.5, markeredgecolor='black')
pylab.plot(dx, My, ".", markerfacecolor='blue', markeredgecolor='blue')
pylab.xlabel ('Time')
#pylab.savefig('fitted.pdf')
pylab.show()
fig, (ax1, ax2, ax3) = pylab.subplots(1, 3, figsize=(15,5))
ax1.hist(theta1, 30, color = 'lightblue', edgecolor = 'black',)
ax2.hist(theta2, 30, color = 'lightblue', edgecolor = 'black',)
ax3.hist(theta3, 30, color = 'lightblue', edgecolor = 'black',)
pylab.savefig ('chainDistrib.pdf')
pylab.show()
print (np.mean (theta1), np.mean (theta2), np.mean (theta3))
print ("Theta 1 = ", np.percentile (theta1, 2.5), " 95% Percentaile = ", np.percentile (theta1, 97.5))
print ("Theta 2 = ", np.percentile (theta2, 2.5), " 95% Percentaile = ", np.percentile (theta2, 97.5))
print ("Theta 3 = ", np.percentile (theta3, 2.5), " 95% Percentaile = ", np.percentile (theta3, 97.5))
# Create a histogram for each parameter
hist1 = np.histogram(theta1, bins=100)
hist2 = np.histogram(theta2, bins=100)
hist3 = np.histogram(theta3, bins=100)
# Convert the histogram into a distribution function
theta1_dist = scipy.stats.rv_histogram (hist1)
theta2_dist = scipy.stats.rv_histogram (hist2)
theta3_dist = scipy.stats.rv_histogram (hist3)
# Draw a samples for each parameter
thetaList = []
# Create 20 0sets of thetas
for i in range (200):
t1 = theta1_dist.rvs()
t2 = theta2_dist.rvs()
t3 = theta3_dist.rvs()
thetaList.append ([t1, t2, t3])
xd = []
ydmean = []; ydpercentile_plus = []; ydpercentile_minus = []
for x in np.arange (0, 40, 1):
yd = []
xd.append (x);
for i in range (100):
yd.append (model (x,thetaList[i]))
ydmean.append (np.mean (yd))
ydpercentile_plus.append (np.percentile (yd, 97.5))
ydpercentile_minus.append (np.percentile (yd, 2.5))
# PLot envelope graph
pylab.plot (xd, ydmean, 'k-')
pylab.fill_between(xd, ydpercentile_minus, ydpercentile_plus, color='orange', alpha=0.4)
pylab.savefig ('ShadedEnvelope.pdf')
pylab.show()
# PLot solution multiple lines
for i in range (100):
xd = []; yd = []
for x in np.arange (0, 40, 1):
xd.append (x);
yd.append (model (x,thetaList[i]))
pylab.plot (xd, yd, alpha=0.2, color='royalblue')
pylab.show()
# PLot scatter of solutions at each data point
for i in range (50):
xd = []; yd = []
for x in np.arange (0, 40, 1.5):
xd.append (x);
yd.append (model (x,thetaList[i]))
pylab.scatter (xd, yd, alpha=0.2, color='firebrick')
pylab.show()
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QModelIndex, QVariant
from gui.main_window.docks.properties.property_widgets.data import *
from gui.main_window.docks.properties.property_widgets.types import *
class PropertiesDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, parent=None):
super(PropertiesDelegate, self).__init__(parent)
def createEditor(self, parent, options, index):
if index.column() == 0:
return super(PropertiesDelegate, self).createEditor(parent, options, index)
if index.internalPointer().info().typeString() == EnumType:
return self.createEnumEditor(parent, options, index)
elif index.internalPointer().info().typeString() == FloatType:
return self.createFloatEditor(parent, options, index)
elif index.internalPointer().info().typeString() == IntType:
return self.createIntEditor(parent, options, index)
elif index.internalPointer().info().typeString() == BoolType:
return self.createBoolEditor(parent, options, index)
return super(PropertiesDelegate, self).createEditor(parent, options, index)
def createEnumEditor(self, parent, options, index):
data = index.internalPointer()
comboBox = QtWidgets.QComboBox(parent)
comboBox.addItems(data.info().enumOptions())
comboBox.setCurrentText(data.value())
return comboBox
def createBoolEditor(self, parent, options, index):
data = index.internalPointer()
comboBox = QtWidgets.QComboBox(parent)
comboBox.addItem("true")
comboBox.addItem("false")
comboBox.setCurrentText(str(data.value()).lower())
return comboBox
def createFloatEditor(self, parent, options, index):
data = index.internalPointer()
spinBox = QtWidgets.QDoubleSpinBox(parent)
spinBox.setRange(-pow(2, 31), pow(2, 31-1))
spinBox.setFocusPolicy(QtCore.Qt.StrongFocus )
spinBox.setDecimals(15)
spinBox.setSingleStep(0.05)
spinBox.setValue(data.value())
return spinBox
def createIntEditor(self, parent, options, index):
data = index.internalPointer()
spinBox = QtWidgets.QSpinBox(parent)
spinBox.setRange(-pow(2, 31), pow(2, 31-1))
spinBox.setFocusPolicy(QtCore.Qt.StrongFocus )
spinBox.setValue(data.value())
return spinBox
def setEditorData(self, editor, index):
data = index.internalPointer()
if isinstance(editor, QtWidgets.QComboBox):
if index.internalPointer().info().typeString() == BoolType:
editor.setCurrentText(str(data.value()).lower())
else:
editor.setCurrentText(data.value())
elif isinstance(editor, QtWidgets.QSpinBox) or isinstance(editor, QtWidgets.QDoubleSpinBox):
editor.setValue(data.value())
elif isinstance(editor, QtWidgets.QLineEdit):
editor.setText(data.value())
else:
super(PropertiesDelegate, self).setEditorData(editor, index)
def setModelData(self, editor, model, index):
if isinstance(editor, QtWidgets.QComboBox):
if index.internalPointer().info().typeString() == BoolType:
model.setData(index, True if editor.currentText() == "true" else False, QtCore.Qt.EditRole)
else:
model.setData(index, editor.currentText(), QtCore.Qt.EditRole)
elif isinstance(editor, QtWidgets.QSpinBox) or isinstance(editor, QtWidgets.QDoubleSpinBox):
model.setData(index, editor.value(), QtCore.Qt.EditRole)
elif isinstance(editor, QtWidgets.QLineEdit):
model.setData(index, editor.text(), QtCore.Qt.EditRole)
else:
super(PropertiesDelegate, self).setModelData(editor, model, index)
|
"""
Configuration file for capturing TD Ameritrade data into PostgreSQL database
@author: Nick Bultman, August 2021
"""
import numpy as np
import pandas as pd
import os
import copy
# Define symbol lookup path - make sure 'symbol' is a column name
symbolpath = '/path/to/symbols.csv'
# Define chrome webdriver path
webdriverpath = '/path/to/chromedriver'
# Define TD Ameritrade Credentials
token_path = '/path/to/token/token.pickle'
api_key = 'insert api key here'
redirect_uri = 'https://localhost'
# Define PostgreSQL Database Credentials
db = 'dbname'
dbuser = 'dbusername'
dbpassword = 'dbpassword'
dbhost = 'host here'
dbport = 'port here'
# Functions to be used
def rsi(values):
up = values[values>0].mean()
down = -1*values[values<0].mean()
return 100 * up / (up + down)
def bbands(price, length=30, numsd=2):
""" returns average, upper band, and lower band"""
# ave = pd.stats.moments.rolling_mean(price,length)
ave = price.rolling(window = length, center = False).mean()
# sd = pd.stats.moments.rolling_std(price,length)
sd = price.rolling(window = length, center = False).std()
upband = ave + (sd*numsd)
dnband = ave - (sd*numsd)
return np.round(ave,3), np.round(upband,3), np.round(dnband,3)
def aroon(df, tf=25):
aroonup = []
aroondown = []
x = tf
while x< len(df['Date']):
aroon_up = ((df['High'][x-tf:x].tolist().index(max(df['High'][x-tf:x])))/float(tf))*100
aroon_down = ((df['Low'][x-tf:x].tolist().index(min(df['Low'][x-tf:x])))/float(tf))*100
aroonup.append(aroon_up)
aroondown.append(aroon_down)
x+=1
return aroonup, aroondown
def abands(df):
# df['AB_Middle_Band'] = pd.rolling_mean(df['Close'], 20)
df['AB_Middle_Band'] = df['Close'].rolling(window = 20, center=False).mean()
# High * ( 1 + 4 * (High - Low) / (High + Low))
df['aupband'] = df['High'] * (1 + 4 * (df['High']-df['Low'])/(df['High']+df['Low']))
df['AB_Upper_Band'] = df['aupband'].rolling(window=20, center=False).mean()
# Low *(1 - 4 * (High - Low)/ (High + Low))
df['adownband'] = df['Low'] * (1 - 4 * (df['High']-df['Low'])/(df['High']+df['Low']))
df['AB_Lower_Band'] = df['adownband'].rolling(window=20, center=False).mean()
def STOK(df, n):
df['STOK'] = ((df['Close'] - df['Low'].rolling(window=n, center=False).mean()) / (df['High'].rolling(window=n, center=False).max() - df['Low'].rolling(window=n, center=False).min())) * 100
df['STOD'] = df['STOK'].rolling(window = 3, center=False).mean()
def CMFlow(df, tf):
CHMF = []
MFMs = []
MFVs = []
x = tf
while x < len(df['Date']):
PeriodVolume = 0
volRange = df['Volume'][x-tf:x]
for eachVol in volRange:
PeriodVolume += eachVol
MFM = ((df['Close'][x] - df['Low'][x]) - (df['High'][x] - df['Close'][x])) / (df['High'][x] - df['Low'][x])
MFV = MFM*PeriodVolume
MFMs.append(MFM)
MFVs.append(MFV)
x+=1
y = tf
while y < len(MFVs):
PeriodVolume = 0
volRange = df['Volume'][x-tf:x]
for eachVol in volRange:
PeriodVolume += eachVol
consider = MFVs[y-tf:y]
tfsMFV = 0
for eachMFV in consider:
tfsMFV += eachMFV
tfsCMF = tfsMFV/PeriodVolume
CHMF.append(tfsCMF)
y+=1
return CHMF
def psar(df, iaf = 0.02, maxaf = 0.2):
length = len(df)
dates = (df['Date'])
high = (df['High'])
low = (df['Low'])
orig_close = copy.deepcopy(df['Close'])
close = (df['Close'])
psar = df['Close'][0:len(df['Close'])]
psarbull = [None] * length
psarbear = [None] * length
bull = True
af = iaf
ep = df['Low'][0]
hp = df['High'][0]
lp = df['Low'][0]
for i in range(2,length):
if bull:
psar[i] = psar[i - 1] + af * (hp - psar[i - 1])
else:
psar[i] = psar[i - 1] + af * (lp - psar[i - 1])
reverse = False
if bull:
if df['Low'][i] < psar[i]:
bull = False
reverse = True
psar[i] = hp
lp = df['Low'][i]
af = iaf
else:
if df['High'][i] > psar[i]:
bull = True
reverse = True
psar[i] = lp
hp = df['High'][i]
af = iaf
if not reverse:
if bull:
if df['High'][i] > hp:
hp = df['High'][i]
af = min(af + iaf, maxaf)
if df['Low'][i - 1] < psar[i]:
psar[i] = df['Low'][i - 1]
if df['Low'][i - 2] < psar[i]:
psar[i] = df['Low'][i - 2]
else:
if df['Low'][i] < lp:
lp = df['Low'][i]
af = min(af + iaf, maxaf)
if df['High'][i - 1] > psar[i]:
psar[i] = df['High'][i - 1]
if df['High'][i - 2] > psar[i]:
psar[i] = df['High'][i - 2]
if bull:
psarbull[i] = psar[i]
else:
psarbear[i] = psar[i]
# return {"dates":dates, "high":high, "low":low, "close":close, "psar":psar, "psarbear":psarbear, "psarbull":psarbull}
# return psar, psarbear, psarbull
df['psar'] = psar
df['Close'] = orig_close
# df['psarbear'] = psarbear
# df['psarbull'] = psarbull
def CCI(df, n, constant):
TP = (df['High'] + df['Low'] + df['Close']) / 3
CCI = pd.Series((TP - TP.rolling(window=n, center=False).mean()) / (constant * TP.rolling(window=n, center=False).std())) #, name = 'CCI_' + str(n))
return CCI
# Keltner Channel
def KELCH(df, n):
KelChM = pd.Series(((df['High'] + df['Low'] + df['Close']) / 3).rolling(window =n, center=False).mean(), name = 'KelChM_' + str(n))
KelChU = pd.Series(((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3).rolling(window =n, center=False).mean(), name = 'KelChU_' + str(n))
KelChD = pd.Series(((-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3).rolling(window =n, center=False).mean(), name = 'KelChD_' + str(n))
return KelChM, KelChD, KelChU
def DMI(df, period):
df['UpMove'] = df['High'] - df['High'].shift(1)
df['DownMove'] = df['Low'].shift(1) - df['Low']
df['Zero'] = 0
df['PlusDM'] = np.where((df['UpMove'] > df['DownMove']) & (df['UpMove'] > df['Zero']), df['UpMove'], 0)
df['MinusDM'] = np.where((df['UpMove'] < df['DownMove']) & (df['DownMove'] > df['Zero']), df['DownMove'], 0)
df['plusDI'] = 100 * (df['PlusDM']/df['ATR']).ewm(span=period,min_periods=0,adjust=True,ignore_na=False).mean()
df['minusDI'] = 100 * (df['MinusDM']/df['ATR']).ewm(span=period,min_periods=0,adjust=True,ignore_na=False).mean()
df['ADX'] = 100 * (abs((df['plusDI'] - df['minusDI'])/(df['plusDI'] + df['minusDI']))).ewm(span=period,min_periods=0,adjust=True,ignore_na=False).mean()
def MFI(df):
# typical price
df['tp'] = (df['High']+df['Low']+df['Close'])/3
#raw money flow
df['rmf'] = df['tp'] * df['Volume']
# positive and negative money flow
df['pmf'] = np.where(df['tp'] > df['tp'].shift(1), df['tp'], 0)
df['nmf'] = np.where(df['tp'] < df['tp'].shift(1), df['tp'], 0)
# money flow ratio
df['mfr'] = df['pmf'].rolling(window=14,center=False).sum()/df['nmf'].rolling(window=14,center=False).sum()
df['Money_Flow_Index'] = 100 - 100 / (1 + df['mfr'])
def ichimoku(df):
# Turning Line
period9_high = df['High'].rolling(window=9,center=False).max()
period9_low = df['Low'].rolling(window=9,center=False).min()
df['turning_line'] = (period9_high + period9_low) / 2
# Standard Line
period26_high = df['High'].rolling(window=26,center=False).max()
period26_low = df['Low'].rolling(window=26,center=False).min()
df['standard_line'] = (period26_high + period26_low) / 2
# Leading Span 1
df['ichimoku_span1'] = ((df['turning_line'] + df['standard_line']) / 2).shift(26)
# Leading Span 2
period52_high = df['High'].rolling(window=52,center=False).max()
period52_low = df['Low'].rolling(window=52,center=False).min()
df['ichimoku_span2'] = ((period52_high + period52_low) / 2).shift(26)
# The most current closing price plotted 22 time periods behind (optional)
df['chikou_span'] = df['Close'].shift(-22) # 22 according to investopedia
def WillR(df):
highest_high = df['High'].rolling(window=14,center=False).max()
lowest_low = df['Low'].rolling(window=14,center=False).min()
df['WillR'] = (-100) * ((highest_high - df['Close']) / (highest_high - lowest_low))
def MINMAX(df):
df['MIN_Volume'] = df['Volume'].rolling(window=14,center=False).min()
df['MAX_Volume'] = df['Volume'].rolling(window=14,center=False).max()
def KAMA(price, n=10, pow1=2, pow2=30):
''' kama indicator '''
''' accepts pandas dataframe of prices '''
absDiffx = abs(price - price.shift(1) )
ER_num = abs( price - price.shift(n) )
ER_den = absDiffx.rolling(window=n,center=False).sum()
ER = ER_num / ER_den
sc = ( ER*(2.0/(pow1+1)-2.0/(pow2+1.0))+2/(pow2+1.0) ) ** 2.0
answer = np.zeros(sc.size)
N = len(answer)
first_value = True
for i in range(N):
if sc[i] != sc[i]:
answer[i] = np.nan
else:
if first_value:
answer[i] = price[i]
first_value = False
else:
answer[i] = answer[i-1] + sc[i] * (price[i] - answer[i-1])
return answer
|
#-*- coding: utf-8 -*-
import logging
import os
import sys
import scrapydo
import time
import utils
import config
from sqlhelper import SqlHelper
from ipproxytool.spiders.proxy.xicidaili import XiCiDaiLiSpider
from ipproxytool.spiders.proxy.sixsixip import SixSixIpSpider
from ipproxytool.spiders.proxy.ip181 import IpOneEightOneSpider
from ipproxytool.spiders.proxy.kuaidaili import KuaiDaiLiSpider
from ipproxytool.spiders.proxy.gatherproxy import GatherproxySpider
from ipproxytool.spiders.proxy.hidemy import HidemySpider
from ipproxytool.spiders.proxy.proxylistplus import ProxylistplusSpider
from ipproxytool.spiders.proxy.freeproxylists import FreeProxyListsSpider
from ipproxytool.spiders.proxy.peuland import PeulandSpider
from ipproxytool.spiders.proxy.usproxy import UsProxySpider
scrapydo.setup()
if __name__ == '__main__':
os.chdir(sys.path[0])
reload(sys)
sys.setdefaultencoding('utf-8')
if not os.path.exists('log'):
os.makedirs('log')
logging.basicConfig(
filename = 'log/spider.log',
format = '%(levelname)s %(asctime)s: %(message)s',
level = logging.DEBUG
)
sql = SqlHelper()
spiders = [
XiCiDaiLiSpider,
SixSixIpSpider,
IpOneEightOneSpider,
# KuaiDaiLiSpider, # 在访问前加了一个 js ,反爬
GatherproxySpider,
HidemySpider,
ProxylistplusSpider,
FreeProxyListsSpider,
# PeulandSpider, # 目标站点失效
UsProxySpider,
]
while True:
utils.log('*******************run spider start...*******************')
command = "DELETE FROM {table} where save_time < SUBDATE(NOW(), INTERVAL 0.5 DAY)".format(
table = config.free_ipproxy_table)
sql.execute(command)
for spider in spiders:
scrapydo.run_spider(spider)
utils.log('*******************run spider waiting...*******************')
time.sleep(600)
|
class Solution:
def isAdditiveNumber(self, num: str) -> bool:
n = len(num)
def dfs(firstNum: int, secondNum: int, s: int) -> bool:
if s == len(num):
return True
thirdNum = firstNum + secondNum
thirdNumStr = str(thirdNum)
return num.find(thirdNumStr, s) == s and dfs(secondNum, thirdNum, s + len(thirdNumStr))
# num[0..i] = firstNum
for i in range(n // 2):
if i > 0 and num[0] == '0':
return False
firstNum = int(num[:i + 1])
# num[i + 1..j] = secondNum
# len(thirdNum) >= max(len(firstNum), len(secondNum))
j = i + 1
while max(i, j - i) < n - j:
if j > i + 1 and num[i + 1] == '0':
break
secondNum = int(num[i + 1:j + 1])
if dfs(firstNum, secondNum, j + 1):
return True
j += 1
return False
|
# PyAgent.py
import random
from enum import Enum
import Action
import Orientation
class knowledge(Enum):
Unkown = 0
Safe = 1
Stench = 2
PossibleWumpus = 3
Wumpus = 4
x = 1
y = 1
gold = False
arrow = True
orientation = Orientation.RIGHT
width = None
height = None
knowledgeBase = {}
visited = {}
wumpusFound = False
wumpusLocation = None
possibleWumpuses = []
bufferedRoute = []
pathHomeIndex = 0
endingOrientation = None
playingAgain = None
shootAgain = None
goldLoc = None
def PyAgent_Constructor ():
print("PyAgent_Constructor")
def PyAgent_Destructor ():
print("PyAgent_Destructor")
def PyAgent_Initialize ():
print("PyAgent_Initialize")
global x,y,gold,arrow,orientation, endingOrientation, pathHomeIndex, playingAgain
print("GoldGG", gold, bufferedRoute)
x = 1
y = 1
gold = False
arrow = True
endingOrientation = orientation
orientation = Orientation.RIGHT
if playingAgain is False:
playingAgain = True
if playingAgain is None:
playingAgain = False
print("@@Playing again ", playingAgain)
def getPrevLocation():
global x, y
prevX = x
prevY = y
if orientation % 4 is Orientation.RIGHT:
prevX-=1
elif orientation % 4 is Orientation.LEFT:
prevX+=1
elif orientation % 4 is Orientation.UP:
prevY-=1
elif orientation % 4 is Orientation.DOWN:
y+= 1
return prevX, prevY
def getLocationInfront():
global x, y
nextX = x
nextY = y
if orientation is Orientation.RIGHT:
nextX += 1
elif orientation is Orientation.LEFT:
nextX -= 1
elif orientation is Orientation.UP:
nextY += 1
elif orientation is Orientation.DOWN:
nextY -= 1
return nextX, nextY
def getNeighbors(location):
x,y = location
return [(x+1, y), (x,y+1), (x,y-1), (x-1,y)]
def inBounds(location):
x,y = location
return x > 0 and (width is None or x <= width) and y > 0 and (height is None or y <= height)
def willBump(x, y):
global width, height
if width is not None and x > width:
return True
elif height is not None and y > height:
return True
else:
return False
def wumpusPlausible(location):
global knowledgeBase
x,y = location
if location in knowledgeBase.keys() and knowledgeBase[location] is knowledge.Wumpus:
return True
for neighbor in getNeighbors((x,y)):
if inBounds(neighbor) and neighbor in visited.keys() and knowledgeBase[neighbor] is knowledge.Safe:
print("No wumpus in ", location)
return False
return True
def getCommandsToFaceLeft(orientation):
if orientation == Orientation.RIGHT:
return [Action.TURNRIGHT, Action.TURNRIGHT]
elif orientation == Orientation.LEFT:
return []
elif orientation == Orientation.DOWN:
return [Action.TURNRIGHT]
else:
return [Action.TURNLEFT]
def getCommandsToFaceRight(orientation):
if orientation == Orientation.LEFT:
return [Action.TURNRIGHT, Action.TURNRIGHT]
elif orientation == Orientation.RIGHT:
return []
elif orientation == Orientation.UP:
return [Action.TURNRIGHT]
else:
return [Action.TURNLEFT]
def getCommandsToFaceDown(orientation):
if orientation is Orientation.UP:
return [Action.TURNRIGHT, Action.TURNRIGHT]
elif orientation is Orientation.DOWN:
return []
elif orientation is Orientation.LEFT:
return [Action.TURNLEFT]
else:
return [Action.TURNRIGHT]
def getCommandsToFaceUp(orientation):
if orientation is Orientation.DOWN:
return [Action.TURNRIGHT, Action.TURNRIGHT]
elif orientation is Orientation.UP:
return []
elif orientation is Orientation.RIGHT:
return [Action.TURNLEFT]
else:
return [Action.TURNRIGHT]
def getCommandsToFace(curOrientation, desiredOrientation):
if desiredOrientation is Orientation.UP:
return getCommandsToFaceUp(curOrientation)
if desiredOrientation is Orientation.DOWN:
return getCommandsToFaceDown(curOrientation)
if desiredOrientation is Orientation.RIGHT:
return getCommandsToFaceRight(curOrientation)
if desiredOrientation is Orientation.LEFT:
return getCommandsToFaceLeft(orientation)
def leftOrDownTieBreaker():
if x == 1:
return True, False
if y == 1:
return False, True
if orientation is Orientation.UP or orientation is Orientation.LEFT:
return True, False
else:
return False, True
def forwardToBump(direction):
result = [Action.GOFORWARD]
timesForward = 0
if direction is Orientation.LEFT:
result *= x-1
elif direction is Orientation.DOWN:
result *= y-1
else:
result = []
print("RESULT!", result)
return result
def planRouteHome():
global wumpusFound, wumpusLocation, x,y
if not wumpusFound:
wumpusLocation = (x+2, y+2)
#Go down if the wumpus is in the first column and we will run into the wumpus, or if the wumpus is not in our column
goDown = (wumpusLocation[0] == 1 and wumpusLocation[1] <= y) or wumpusLocation[1] > y or wumpusLocation[0] != x
#Go left if the wumpus is in the first row and we will run into the wumpus, or if the wumpus is not in our row
goLeft = (wumpusLocation[1] == 1 and wumpusLocation[0] <= x) or wumpusLocation[0] > x or wumpusLocation[1] != y
global bufferedRoute
if goLeft and goDown:
goLeft, goDown = leftOrDownTieBreaker()
if goDown:
print("Going down")
bufferedRoute += getCommandsToFaceDown(orientation)
bufferedRoute += forwardToBump(Orientation.DOWN)
bufferedRoute += getCommandsToFaceLeft(Orientation.DOWN)
bufferedRoute += forwardToBump(Orientation.LEFT)
bufferedRoute.append(Action.CLIMB)
print(bufferedRoute)
elif goLeft:
print("GOing left")
bufferedRoute += getCommandsToFaceLeft(orientation)
bufferedRoute += forwardToBump(Orientation.LEFT)
bufferedRoute += getCommandsToFaceDown(Orientation.LEFT)
bufferedRoute += forwardToBump(Orientation.DOWN)
bufferedRoute.append(Action.CLIMB)
else:
#We can't get home because we are on the left or on the bottom and the path is blocked
if x == 1:
#On the bottom
bufferedRoute += getCommandsToFaceUp(orientation)
bufferedRoute.append(Action.GOFORWARD)
bufferedRoute += getCommandsToFaceLeft(Orientation.UP)
bufferedRoute += forwardToBump(Orientation.LEFT)
bufferedRoute += getCommandsToFaceDown(Orientation.LEFT)
bufferedRoute += forwardToBump(Orientation.DOWN)
bufferedRoute += [Action.GOFORWARD, Action.CLIMB]
if y == 1:
#On the left
bufferedRoute += getCommandsToFaceRight(orientation)
bufferedRoute.append(Action.GOFORWARD)
bufferedRoute += getCommandsToFaceDown(Orientation.RIGHT)
bufferedRoute += forwardToBump(Orientation.DOWN)
bufferedRoute += getCommandsToFaceLeft(Orientation.DOWN)
bufferedRoute += forwardToBump(Orientation.LEFT)
bufferedRoute += [Action.GOFORWARD, Action.CLIMB]
#Wumpus not found
#TODO
def locationSafeAndUnexplored(nextLocation):
global knowledgeBase
return nextLocation not in visited.keys() and (nextLocation not in knowledgeBase.keys() or knowledgeBase[nextLocation] is knowledge.Safe or knowledgeBase[nextLocation] is knowledge.PossibleWumpus and wumpusFound)
def turn(action):
if action is Action.TURNRIGHT:
return turnRight()
elif action is Action.TURNLEFT:
return turnLeft()
else:
print("Trying to turn with non turn action", action)
exit(1)
def turnRight():
global orientation
orientation = (orientation - 1) % 4
return Action.TURNRIGHT
def turnLeft():
global orientation
orientation = (orientation + 1) % 4
return Action.TURNLEFT
# Only use this on neighbors
def facePoint(otherLocation):
otherX,otherY = otherLocation
global orientation
if x < otherX:
if orientation is Orientation.DOWN:
return turnLeft()
else:
return turnRight()
elif x > otherX:
if orientation is Orientation.UP:
return turnLeft()
else:
return turnRight()
elif y < otherY:
if orientation is Orientation.LEFT:
return turnRight()
else:
return turnLeft()
elif y > otherY:
if orientation is Orientation.LEFT:
return turnLeft()
else:
return turnRight()
def getOppositeTurn(turn):
if turn == Action.TURNLEFT:
return turnRight()
if turn == Action.TURNRIGHT:
return turnLeft()
else:
print("Can't unturn a non turn")
exit(1)
def getOppositeDirection(direction):
if direction is Orientation.RIGHT:
return Orientation.LEFT
if direction is Orientation.LEFT:
return Orientation.RIGHT
if direction is Orientation.UP:
return Orientation.DOWN
if direction is Orientation.DOWN:
return Orientation.UP
def goForward():
global x,y, orientation
if orientation is Orientation.RIGHT:
x += 1
if orientation is Orientation.LEFT:
x -= 1
if orientation is Orientation.UP:
y += 1
if orientation is Orientation.DOWN:
y -= 1
return Action.GOFORWARD
def PyAgent_Process (stench,breeze,glitter,bump,scream):
# time.sleep(1)
global x, y, orientation, width, height, gold, knowledgeBase, pathHomeIndex, bufferedRoute, arrow, visited, goldLoc
perceptStr = ""
if (stench == 1):
perceptStr += "Stench=True,"
else:
perceptStr += "Stench=False,"
if (breeze == 1):
perceptStr += "Breeze=True,"
else:
perceptStr += "Breeze=False,"
if (glitter == 1):
perceptStr += "Glitter=True,"
else:
perceptStr += "Glitter=False,"
if (bump == 1):
perceptStr += "Bump=True,"
else:
perceptStr += "Bump=False,"
if (scream == 1):
perceptStr += "Scream=True"
else:
perceptStr += "Scream=False"
print("PyAgent_Process: " + perceptStr + '\n', x, y, gold, arrow, orientation, width, height, wumpusLocation)
if x == 1 and y == 1 and gold:
global endingOrientation
endingOrientation = orientation
return Action.CLIMB
if playingAgain:
print("Playing again!")
if goldLoc is not None:
global shootAgain
if scream:
shootAgain = True
if not arrow and shootAgain is None:
shootAgain = False
if not gold:
if x < goldLoc[0]:
toFaceRight = getCommandsToFaceRight(orientation)
if len(toFaceRight) > 0:
return turn(toFaceRight[0])
if stench and arrow:
arrow = False
return Action.SHOOT
return goForward()
if y < goldLoc[1]:
toFaceup = getCommandsToFaceUp(orientation)
if len(toFaceup) > 0:
return turn(toFaceup[0])
if stench and arrow and (shootAgain is None or shootAgain):
arrow = False
return Action.SHOOT
return goForward()
if glitter:
gold = True
return Action.GRAB
else:
print("Error, no action to execute")
else:
if y > 1:
commands = getCommandsToFaceDown(orientation)
if len(commands) > 0:
return turn(commands[0])
return goForward()
if x > 1:
commands = getCommandsToFaceLeft(orientation)
if len(commands) > 0:
print("going to Facing left")
print(commands)
return turn(commands[0])
return goForward()
return Action.CLIMB
else:
print("Playing again after not finding gold")
pass
if bump is 1:
if orientation is Orientation.RIGHT:
x -= 1
if width is None:
width = x
if orientation is Orientation.LEFT:
x += 1
if orientation is Orientation.UP:
y -= 1
if height is None:
height = y
if orientation is Orientation.DOWN:
y += 1
print(knowledgeBase, "\n")
print(visited, "\n")
if glitter is 1:
gold = True
planRouteHome()
global goldLoc
goldLoc = (x,y)
return Action.GRAB
if gold:
print("On buffered route: ", pathHomeIndex, bufferedRoute)
action = bufferedRoute[pathHomeIndex]
if action is Action.GOFORWARD:
nextLocation = getLocationInfront()
if wumpusPlausible(nextLocation) and arrow:
arrow = False
return Action.SHOOT
pathHomeIndex+=1
return goForward()
else:
turnAction = bufferedRoute[pathHomeIndex]
pathHomeIndex+=1
return turn(turnAction)
nextLocation = getLocationInfront()
xSaved = x
ySaved = y
UpdateKnowledgeBase(stench, xSaved, ySaved)
nextMove = PickNextMove(knowledgeBase, nextLocation, x, y)
print("PICK NEXT MOVE", nextMove)
return nextMove
def PickNextMove(knowledgeBase, nextLocation, x, y):
if inBounds(nextLocation) and locationSafeAndUnexplored(nextLocation):
# Explore directly in front of us
return goForward()
else:
neighbors = getNeighbors((x, y))
print("NAAYBORES", neighbors)
for neighbor in neighbors:
if inBounds(neighbor) and (locationSafeAndUnexplored(neighbor)):
print("Found neighbor")
return facePoint(neighbor)
# If we reach this point, no neighbor is unexplored and safe, we will pick a safe direction
print("No unvisited safe neighbor")
safeNeighbors = []
for neighbor in neighbors:
if inBounds(neighbor) and (neighbor in visited.keys() or neighbor in knowledgeBase.keys() and knowledgeBase[neighbor] is knowledge.Safe):
safeNeighbors.append(neighbor)
else:
print(neighbor, "Not safe")
if nextLocation in safeNeighbors:
safeNeighbors += [nextLocation] * 1
print(safeNeighbors)
if len(safeNeighbors) == 0:
return goForward()
face = random.randrange(len(safeNeighbors))
nextTarget = safeNeighbors[face]
if nextTarget is nextLocation:
return goForward()
else:
return facePoint(nextTarget)
def UpdateKnowledgeBase(stench, x, y):
global knowledgeBase
if stench == 1:
knowledgeBase[(x, y)] = knowledge.Stench
print("XXX set stench", (x,y))
neighbors = getNeighbors((x, y))
possibleWumpusCount = 0
possibleWumpus = (-1,-1)
for neighbor in neighbors:
if neighbor in possibleWumpuses and (x,y) not in visited.keys():
global wumpusFound, wumpusLocation
wumpusFound = True
wumpusLocation = neighbor
knowledgeBase[wumpusLocation] = knowledge.Wumpus
print("FOund wumpus at ", wumpusLocation)
visited[(x,y)] = True
return
visited[(x,y)] = True
for neighbor in neighbors:
if inBounds(neighbor) and wumpusPlausible(neighbor):
possibleWumpusCount += 1
possibleWumpus = neighbor
# if true, then we are adding a new possible wumpus
if neighbor not in knowledgeBase.keys():
possibleWumpuses.append(neighbor)
knowledgeBase[neighbor] = knowledge.PossibleWumpus
if possibleWumpusCount == 1:
# Only one possible spot for the wumpus to be, therefore we have found it.
global wumpusFound, wumpusLocation
wumpusFound = True
wumpusLocation = possibleWumpus
knowledgeBase[wumpusLocation] = knowledge.Wumpus
print("FOund wumpus at ", wumpusLocation)
return
else:
visited[(x,y)] = True
knowledgeBase[(x,y)] = knowledge.Safe
if (x, y) in knowledgeBase.keys() and knowledgeBase[(x, y)] is knowledge.PossibleWumpus:
possibleWumpuses.remove((x, y))
knowledgeBase[(x, y)] = knowledge.Safe
toRemove = []
for possibleWumpus in possibleWumpuses:
if not wumpusPlausible(possibleWumpus):
toRemove.append()
for nonWumpus in toRemove:
possibleWumpuses.remove(nonWumpus)
if len(possibleWumpuses) == 1:
global wumpusFound
wumpusFound = True
wumpusLocation = possibleWumpuses[0]
else:
knowledgeBase[(x, y)] = knowledge.Safe
for neighbor in getNeighbors((x, y)):
if inBounds(neighbor):
if neighbor not in knowledgeBase.keys():
knowledgeBase[neighbor] = knowledge.Safe
elif knowledgeBase[neighbor] is knowledge.PossibleWumpus:
possibleWumpuses.remove(neighbor)
if knowledgeBase[neighbor] is not knowledge.Stench:
knowledgeBase[neighbor] = knowledge.Safe
def PyAgent_GameOver (score):
print("PyAgent_GameOver: score = " + str(score))
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import *
from .extension_management_client import ExtensionManagementClient
__all__ = [
'AcquisitionOperation',
'AcquisitionOperationDisallowReason',
'AcquisitionOptions',
'Contribution',
'ContributionBase',
'ContributionConstraint',
'ContributionPropertyDescription',
'ContributionType',
'ExtensionAcquisitionRequest',
'ExtensionAuthorization',
'ExtensionBadge',
'ExtensionDataCollection',
'ExtensionDataCollectionQuery',
'ExtensionEventCallback',
'ExtensionEventCallbackCollection',
'ExtensionFile',
'ExtensionIdentifier',
'ExtensionLicensing',
'ExtensionManifest',
'ExtensionPolicy',
'ExtensionRequest',
'ExtensionShare',
'ExtensionState',
'ExtensionStatistic',
'ExtensionVersion',
'GraphSubjectBase',
'IdentityRef',
'InstallationTarget',
'InstalledExtension',
'InstalledExtensionQuery',
'InstalledExtensionState',
'InstalledExtensionStateIssue',
'LicensingOverride',
'PublishedExtension',
'PublisherFacts',
'ReferenceLinks',
'RequestedExtension',
'UserExtensionPolicy',
'ExtensionManagementClient'
]
|
import uuid
from django.test import TestCase
from core.tests import utils_fixtures
from conventions.models import Convention
from comments.models import Comment, CommentStatut
from users.models import User
class ConventionModelsTest(TestCase):
@classmethod
def setUpTestData(cls):
utils_fixtures.create_all()
def test_object_detail(self):
convention = Convention.objects.get(numero="0001")
user = User.objects.first()
comment = Comment.objects.create(
nom_objet="programme",
champ_objet="code_postal",
uuid_objet=uuid.uuid4(),
convention=convention,
user=user,
message="This is a message",
statut=CommentStatut.OUVERT,
)
self.assertEqual(comment.object_detail(), "Code postal de l'opération")
comment.nom_objet = "logement_edd"
self.assertEqual(comment.object_detail(), "Tableau de l'EDD simplifié")
comment.nom_objet = "reference_cadastrale"
self.assertEqual(comment.object_detail(), "Tableau des références cadastrales")
comment.nom_objet = "bailleur"
comment.champ_objet = "code_postal"
self.assertEqual(comment.object_detail(), "Code postal du bailleur")
comment.champ_objet = "signataire_nom"
self.assertEqual(comment.object_detail(), "Nom du signataire du bailleur")
comment.champ_objet = "unknown_field"
self.assertEqual(comment.object_detail(), "bailleur - unknown_field")
comment.nom_objet = "programme"
comment.champ_objet = "nb_logements"
self.assertEqual(comment.object_detail(), "Nombre de logements à conventionner")
comment.champ_objet = "type_operation"
self.assertEqual(comment.object_detail(), "Type d'opération")
comment.champ_objet = "unknown_field"
self.assertEqual(comment.object_detail(), "programme - unknown_field")
comment.nom_objet = "convention"
comment.champ_objet = "fond_propre"
self.assertEqual(comment.object_detail(), "Fonds propres finançant l'opération")
comment.champ_objet = "comments"
self.assertEqual(
comment.object_detail(), "Commentaires à l'attention de l'instructeur"
)
comment.champ_objet = "unknown_field"
self.assertEqual(comment.object_detail(), "convention - unknown_field")
comment.nom_objet = "lot"
comment.champ_objet = "annexe_caves"
self.assertEqual(comment.object_detail(), "Option caves dans les annexes")
comment.champ_objet = "annexe_sechoirs"
self.assertEqual(comment.object_detail(), "Option séchoirs dans les annexes")
comment.champ_objet = "unknown_field"
self.assertEqual(comment.object_detail(), "lot - unknown_field")
comment.nom_objet = "unknown_object"
comment.champ_objet = "annexe_caves"
self.assertEqual(comment.object_detail(), "unknown_object - annexe_caves")
comment.champ_objet = "unknown_field"
self.assertEqual(comment.object_detail(), "unknown_object - unknown_field")
|
# Copyright 2019-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
sys.path.insert('..')
import torch.utils.data
from torchvision.utils import save_image
from modules import *
from model import Model
from launcher import run
from checkpts.checkpointer import Checkpointer
from dlutils.pytorch import count_parameters
from configs.defaults import get_cfg_defaults
import lreq
from PIL import Image
import random
lreq.use_implicit_lreq.set(True)
def sample(cfg, logger):
torch.cuda.set_device(0)
model = Model(
startf=cfg.MODEL.START_CHANNEL_COUNT,
layer_count=cfg.MODEL.LAYER_COUNT,
maxf=cfg.MODEL.MAX_CHANNEL_COUNT,
latent_size=cfg.MODEL.LATENT_SPACE_SIZE,
truncation_psi=cfg.MODEL.TRUNCATIOM_PSI,
truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF,
mapping_layers=cfg.MODEL.MAPPING_LAYERS,
channels=cfg.MODEL.CHANNELS,
generator=cfg.MODEL.GENERATOR,
encoder=cfg.MODEL.ENCODER)
model.cuda(0)
model.eval()
model.requires_grad_(False)
decoder = model.decoder
encoder = model.encoder
mapping_tl = model.mapping_tl
mapping_fl = model.mapping_fl
dlatent_avg = model.dlatent_avg
logger.info("Trainable parameters generator:")
count_parameters(decoder)
logger.info("Trainable parameters discriminator:")
count_parameters(encoder)
arguments = dict()
arguments["iteration"] = 0
model_dict = {
'discriminator_s': encoder,
'generator_s': decoder,
'mapping_tl_s': mapping_tl,
'mapping_fl_s': mapping_fl,
'dlatent_avg': dlatent_avg
}
checkpointer = Checkpointer(cfg,
model_dict,
{},
logger=logger,
save=False)
extra_checkpoint_data = checkpointer.load()
model.eval()
layer_count = cfg.MODEL.LAYER_COUNT
def encode(x):
Z, _ = model.encode(x, layer_count - 1, 1)
Z = Z.repeat(1, model.mapping_fl.num_layers, 1)
return Z
def decode(x):
layer_idx = torch.arange(2 * cfg.MODEL.LAYER_COUNT)[np.newaxis, :, np.newaxis]
ones = torch.ones(layer_idx.shape, dtype=torch.float32)
coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones)
# x = torch.lerp(model.dlatent_avg.buff.data, x, coefs)
return model.decoder(x, layer_count - 1, 1, noise=True)
path = cfg.DATASET.SAMPLES_PATH
im_size = 2 ** (cfg.MODEL.LAYER_COUNT + 1)
def do_attribute_traversal(path, attrib_idx, start, end):
img = np.asarray(Image.open(path))
if img.shape[2] == 4:
img = img[:, :, :3]
im = img.transpose((2, 0, 1))
x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() / 127.5 - 1.
if x.shape[0] == 4:
x = x[:3]
factor = x.shape[2] // im_size
if factor != 1:
x = torch.nn.functional.avg_pool2d(x[None, ...], factor, factor)[0]
assert x.shape[2] == im_size
_latents = encode(x[None, ...].cuda())
latents = _latents[0, 0]
latents -= model.dlatent_avg.buff.data[0]
w0 = torch.tensor(np.load("principal_directions/direction_%d.npy" % attrib_idx), dtype=torch.float32)
attr0 = (latents * w0).sum()
latents = latents - attr0 * w0
def update_image(w):
with torch.no_grad():
w = w + model.dlatent_avg.buff.data[0]
w = w[None, None, ...].repeat(1, model.mapping_fl.num_layers, 1)
layer_idx = torch.arange(model.mapping_fl.num_layers)[np.newaxis, :, np.newaxis]
cur_layers = (7 + 1) * 2
mixing_cutoff = cur_layers
styles = torch.where(layer_idx < mixing_cutoff, w, _latents[0])
x_rec = decode(styles)
return x_rec
traversal = []
r = 7
inc = (end - start) / (r - 1)
for i in range(r):
W = latents + w0 * (attr0 + start)
im = update_image(W)
traversal.append(im)
attr0 += inc
res = torch.cat(traversal)
indices = [0, 1, 2, 3, 4, 10, 11, 17, 19]
labels = ["gender",
"smile",
"attractive",
"wavy-hair",
"young",
"big_lips",
"big_nose",
"chubby",
"glasses",
]
save_image(res * 0.5 + 0.5, "make_figures/output/%s/traversal_%s.jpg" % (
cfg.NAME, labels[indices.index(attrib_idx)]), pad_value=1)
do_attribute_traversal(path + '/00049.png', 0, 0.6, -34)
do_attribute_traversal(path + '/00125.png', 1, -3, 15.0)
do_attribute_traversal(path + '/00057.png', 3, -2, 30.0)
do_attribute_traversal(path + '/00031.png', 4, -10, 30.0)
do_attribute_traversal(path + '/00088.png', 10, -0.3, 30.0)
do_attribute_traversal(path + '/00004.png', 11, -25, 20.0)
do_attribute_traversal(path + '/00012.png', 17, -40, 40.0)
do_attribute_traversal(path + '/00017.png', 19, 0, 30.0)
if __name__ == "__main__":
gpu_count = 1
run(sample, get_cfg_defaults(), description='ALAE-traversals', default_config='configs/ffhq.yaml',
world_size=gpu_count, write_log=False)
|
from django.conf import settings
from django.core import checks
E001 = checks.Error(
"Missing 'CDN_CACHE_ENDPOINT' setting.",
hint="It should be set to CDN 'cache' endpoint or set to None.",
id='django_rt_cdn.E001',
)
E002 = checks.Error(
"Missing 'CDN_IMAGE_ENDPOINT' setting.",
hint="It should be set to CDN 'image' endpoint or set to None.",
id='django_rt_cdn.E002',
)
def check_cache_endpoint(app_configs, **kwargs): # pragma: no cover
if not hasattr(settings, 'CDN_CACHE_ENDPOINT'):
return [E001]
return []
def check_image_endpoint(app_configs, **kwargs): # pragma: no cover
if not hasattr(settings, 'CDN_IMAGE_ENDPOINT'):
return [E002]
return []
|
from test.slurm_assertions import (assert_job_canceled, assert_job_polled,
assert_job_submitted)
from test.slurmoutput import completed_slurm_job
from test.testdoubles.executor import (CommandExecutorStub, RunningCommandStub,
SlurmJobExecutorSpy)
from unittest.mock import Mock
import pytest
from hpcrocket.core.executor import CommandExecutor
from hpcrocket.core.slurmbatchjob import SlurmBatchJob, SlurmError
from hpcrocket.core.slurmcontroller import SlurmController
from hpcrocket.watcher.jobwatcher import JobWatcher, JobWatcherFactory
def make_sut(executor: CommandExecutor, factory: JobWatcherFactory = None) -> SlurmController:
return SlurmController(executor, factory)
def test__when_submitting_job__should_call_sbatch_on_executor():
executor = SlurmJobExecutorSpy()
sut = make_sut(executor)
jobfile = "jobfile.job"
sut.submit(jobfile)
assert_job_submitted(executor, jobfile)
def test__when_submitting_job__should_return_slurm_batch_job():
executor = SlurmJobExecutorSpy()
sut = make_sut(executor)
jobfile = "jobfile.job"
actual = sut.submit(jobfile)
assert isinstance(actual, SlurmBatchJob)
def test__when_submitting_job__job_should_have_jobid():
jobid = "12345"
executor = SlurmJobExecutorSpy(jobid=jobid)
sut = make_sut(executor)
jobfile = "jobfile.job"
actual = sut.submit(jobfile)
assert actual.jobid == "12345"
def test__when_submitting_job_fails__should_raise_slurmerror():
executor = CommandExecutorStub(RunningCommandStub(exit_code=1))
sut = make_sut(executor)
jobfile = "jobfile.job"
with pytest.raises(SlurmError):
sut.submit(jobfile)
def test__when_polling_job__should_call_sacct_on_executor():
jobid = "12345"
executor = SlurmJobExecutorSpy(jobid=jobid)
sut = make_sut(executor)
sut.poll_status(jobid)
assert_job_polled(executor, jobid)
def test__when_polling_job_fails__should_raise_slurmerror():
executor = CommandExecutorStub(RunningCommandStub(exit_code=1))
sut = make_sut(executor)
jobid = "12345"
with pytest.raises(SlurmError):
sut.poll_status(jobid)
def test__when_polling_job__should_return_job_status():
jobid = "12345"
executor = SlurmJobExecutorSpy(jobid=jobid)
sut = make_sut(executor)
actual = sut.poll_status(jobid)
assert actual == completed_slurm_job()
def test__when_canceling_job__should_call_scancel_on_executor():
jobid = "12345"
executor = SlurmJobExecutorSpy(jobid=jobid)
sut = make_sut(executor)
sut.cancel(jobid)
assert_job_canceled(executor, jobid)
def test__given_watcher_factory__when_submitting_job__should_pass_factory_to_slurm_job():
executor = SlurmJobExecutorSpy()
watcher_dummy = Mock(spec=JobWatcher)
def factory(job):
return watcher_dummy
sut = make_sut(executor, factory)
job = sut.submit("jobfile")
actual = job.get_watcher()
assert actual is watcher_dummy
def test__when_canceling_job_fails__should_raise_slurmerror():
executor = CommandExecutorStub(RunningCommandStub(exit_code=1))
sut = make_sut(executor)
jobid = "1234"
with pytest.raises(SlurmError):
sut.cancel(jobid)
|
"""Extract a frame from the initial state of an environment for illustration purposes.
Lets user interactively move the camera, then takes a screenshot when ready."""
import argparse
import select
import sys
import time
import imageio
import mujoco_py
import numpy as np
from aprl.envs.wrappers import make_env
from aprl.visualize.annotated_gym_compete import AnnotatedGymCompete
def get_img(env_name, seed):
env = make_env(env_name, int(seed), 0, None)
env = AnnotatedGymCompete(env, env_name, 'zoo', '1', 'zoo', '1', None,
resolution=(640, 480), font='times', font_size=24,
draw=False)
env.reset()
env_scene = env.unwrapped.env_scene
env_scene.viewer = mujoco_py.MjViewer(init_width=1000, init_height=750)
env_scene.viewer.start()
env_scene.viewer.set_model(env_scene.model)
env_scene.viewer_setup()
print("Type save to save the image, step to take one timestep.")
running = True
while running:
img = None
while sys.stdin not in select.select([sys.stdin], [], [], 0)[0]:
env.render()
img = env.render(mode='rgb_array')
input = sys.stdin.readline().strip()
if input == 'save':
running = False
elif input == 'step':
action = tuple(np.zeros(space.shape) for space in env.action_space.spaces)
env.step(action)
else:
print(f"Unrecognized command '{input}'")
return img
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, help="environment name")
parser.add_argument('--seed', type=int, default=time.time())
parser.add_argument('--out', type=str, help="path to save figure")
args = parser.parse_args()
img = get_img(args.env, args.seed)
imageio.imwrite(args.out, img)
if __name__ == '__main__':
main()
|
from telegram import Update
from src.config import bc
from src.log import log
def log_command(update: Update) -> None:
title = update.message.chat.title or "<DM>"
log.info("(" + title + ") " + update.message.from_user.username + ": " + update.message.text)
def check_auth(update: Update) -> bool:
if update.message.chat.id not in bc.config.telegram["channel_whitelist"]:
return False
return True
def reply(update: Update, text: str) -> None:
title = update.message.chat.title or "<DM>"
log.info("(" + title + ") " + update.message.from_user.username + ": " + text)
update.message.reply_text(text)
|
import torch
import torch.nn as nn
class Autoencoder_FC(nn.Module):
def __init__(self, in_shape):
super().__init__()
bs,w,tracks = in_shape
self.encoder = nn.Sequential(
nn.Linear(w * tracks, 512),
nn.ReLU(),
nn.Linear(512, 128),
nn.ReLU(),
nn.Linear(128, 32),
nn.ReLU(),
nn.Linear(32, 16),
)
self.decoder = nn.Sequential(
nn.Linear(16, 32),
nn.ReLU(),
nn.Linear(32, 128),
nn.ReLU(),
nn.Linear(128, 512),
nn.ReLU(),
nn.Linear(512, w * tracks),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def embeding(self, x):
x = self.encoder(x)
return x
class Mean_Vec_Autoencoder_FC(nn.Module):
def __init__(self, in_shape):
super().__init__()
bs,w,tracks = in_shape
self.encoder = nn.Sequential(
nn.Linear(tracks, 512),
nn.ReLU(),
nn.Linear(512, 128),
nn.ReLU(),
nn.Linear(128, 32),
nn.ReLU(),
nn.Linear(32, 16),
)
self.decoder = nn.Sequential(
nn.Linear(16, 32),
nn.ReLU(),
nn.Linear(32, 128),
nn.ReLU(),
nn.Linear(128, 512),
nn.ReLU(),
nn.Linear(512, tracks),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def embeding(self, x):
x = self.encoder(x)
return x
|
from . import optimize_variational_circuit_with_proxy
from zquantum.core.interfaces.mock_objects import MockOptimizer
from .client_mock import MockedClient
import http.client
import unittest
import random
import subprocess
class TestOptimizationServer(unittest.TestCase):
def setUp(self):
self.port = "1234"
self.ipaddress = "testing-ip"
def test_optimize_variational_circuit_with_proxy_all_zero_line(self):
# Given
client = MockedClient(self.ipaddress, self.port)
params = [0, 0]
optimizer = MockOptimizer()
# When
opt_results = optimize_variational_circuit_with_proxy(params,
optimizer, client)
# Then
self.assertEqual(opt_results['opt_value'], 0)
self.assertEqual(len(opt_results['opt_params']), 2)
self.assertEqual(opt_results['history'], [{'optimization-evaluation-ids': ['MOCKED-ID']}])
def test_optimize_variational_circuit_with_proxy_x_squared(self):
# Given
client = MockedClient(self.ipaddress, self.port, "return_x_squared")
params = [4]
optimizer = MockOptimizer()
# When
opt_results = optimize_variational_circuit_with_proxy(params,
optimizer, client)
# Then
self.assertGreater(opt_results['opt_value'], 0)
self.assertEqual(len(opt_results['opt_params']), 1)
self.assertEqual(opt_results['history'], [{'optimization-evaluation-ids': ['MOCKED-ID']}])
def test_optimize_variational_circuit_with_proxy_errors(self):
client = MockedClient(self.ipaddress, self.port)
params = [0]
optimizer = MockOptimizer()
# self.assertRaises(ValueError, lambda: optimize_variational_circuit_with_proxy(
# "Not initial params", optimizer, client))
self.assertRaises(AttributeError, lambda: optimize_variational_circuit_with_proxy(
params, "Not an optimizer object", "Not a client"))
self.assertRaises(AttributeError, lambda: optimize_variational_circuit_with_proxy(
params, optimizer, "Not a client"))
@classmethod
def tearDownClass(self):
subprocess.call(["rm", 'client_mock_evaluation_result.json',
'current_optimization_params.json'])
|
from lollipop.compat import iteritems, string_types
__all__ = [
'SCHEMA',
'ValidationError',
'ValidationErrorBuilder',
'merge_errors',
]
#: Name of an error key for cases when you have both errors for the object and
#: for it's fields::
#:
#: {'field1': 'Field error', '_schema': 'Whole object error'}
SCHEMA = '_schema'
MISSING_ERROR_MESSAGE = \
'Error message "{error_key}" in class {class_name} does not exist'
class ValidationError(Exception):
"""Exception to report validation errors.
Examples of valid error messages: ::
raise ValidationError('Error')
raise ValidationError(['Error 1', 'Error 2'])
raise ValidationError({
'field1': 'Error 1',
'field2': {'subfield1': ['Error 2', 'Error 3']}
})
:param messages: Validation error messages. String, list of strings or dict
where keys are nested fields and values are error messages.
"""
def __init__(self, messages):
super(ValidationError, self).__init__('Invalid data: %r' % messages)
# TODO: normalize messages
self.messages = messages
class ErrorMessagesMixin(object):
def __init__(self, error_messages=None, *args, **kwargs):
super(ErrorMessagesMixin, self).__init__(*args, **kwargs)
self._error_messages = {}
for cls in reversed(self.__class__.__mro__):
self._error_messages.update(getattr(cls, 'default_error_messages', {}))
self._error_messages.update(error_messages or {})
def _fail(self, error_key, **kwargs):
if error_key not in self._error_messages:
msg = MISSING_ERROR_MESSAGE.format(
class_name=self.__class__.__name__,
error_key=error_key
)
raise ValueError(msg)
msg = self._error_messages[error_key]
if isinstance(msg, str):
msg = msg.format(**kwargs)
raise ValidationError(msg)
def merge_errors(errors1, errors2):
"""Deeply merges two error messages. Error messages can be
string, list of strings or dict of error messages (recursively).
Format is the same as accepted by :exc:`ValidationError`.
Returns new error messages.
"""
if errors1 is None:
return errors2
elif errors2 is None:
return errors1
if isinstance(errors1, list):
if not errors1:
return errors2
if isinstance(errors2, list):
return errors1 + errors2
elif isinstance(errors2, dict):
return dict(
errors2,
**{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))}
)
else:
return errors1 + [errors2]
elif isinstance(errors1, dict):
if isinstance(errors2, list):
return dict(
errors1,
**{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)}
)
elif isinstance(errors2, dict):
errors = dict(errors1)
for k, v in iteritems(errors2):
if k in errors:
errors[k] = merge_errors(errors[k], v)
else:
errors[k] = v
return errors
else:
return dict(
errors1,
**{SCHEMA: merge_errors(errors1.get(SCHEMA), errors2)}
)
else:
if isinstance(errors2, list):
return [errors1] + errors2 if errors2 else errors1
elif isinstance(errors2, dict):
return dict(
errors2,
**{SCHEMA: merge_errors(errors1, errors2.get(SCHEMA))}
)
else:
return [errors1, errors2]
class ValidationErrorBuilder(object):
"""Helper class to report multiple errors.
Example: ::
def validate_all(data):
builder = ValidationErrorBuilder()
if data['foo']['bar'] >= data['baz']['bam']:
builder.add_error('foo.bar', 'Should be less than bam')
if data['foo']['quux'] >= data['baz']['bam']:
builder.add_fields('foo.quux', 'Should be less than bam')
...
builder.raise_errors()
"""
def __init__(self):
self.errors = None
def _make_error(self, path, error):
parts = path.split('.', 1) if isinstance(path, string_types) else [path]
if len(parts) == 1:
return {path: error}
else:
return {parts[0]: self._make_error(parts[1], error)}
def add_error(self, path, error):
"""Add error message for given field path.
Example: ::
builder = ValidationErrorBuilder()
builder.add_error('foo.bar.baz', 'Some error')
print builder.errors
# => {'foo': {'bar': {'baz': 'Some error'}}}
:param str path: '.'-separated list of field names
:param str error: Error message
"""
self.errors = merge_errors(self.errors, self._make_error(path, error))
def add_errors(self, errors):
"""Add errors in dict format.
Example: ::
builder = ValidationErrorBuilder()
builder.add_errors({'foo': {'bar': 'Error 1'}})
builder.add_errors({'foo': {'baz': 'Error 2'}, 'bam': 'Error 3'})
print builder.errors
# => {'foo': {'bar': 'Error 1', 'baz': 'Error 2'}, 'bam': 'Error 3'}
:param str, list or dict errors: Errors to merge
"""
self.errors = merge_errors(self.errors, errors)
def raise_errors(self):
"""Raise :exc:`ValidationError` if errors are not empty;
do nothing otherwise.
"""
if self.errors:
raise ValidationError(self.errors)
|
"""Common utility functions shared across classes"""
def convert_layers_to_string(layers: list) -> str:
"""Given Card or View layers, convert the grid layers to a string"""
string_conversion = ""
for layer in layers:
string_conversion += "\n" + "".join(layer)
return string_conversion
|
from locintel.core.datamodel.geo import Geometry, GeoCoordinate
from locintel.graphs.datamodel.jurbey import Path
from tests.base_fixture import coordinates, graph
graph = graph
expected_paths_edges = [
[(4, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 0)],
[(6, 7), (7, 8), (8, 9), (9, 10), (10, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5)],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 6), (6, 15)],
[(2, 3), (3, 4), (4, 6), (6, 7), (7, 8), (8, 2), (2, 16)],
[(2, 3), (3, 4), (4, 6), (6, 7), (7, 8), (8, 11), (11, 13)],
[(14, 12), (12, 9), (9, 10), (10, 0), (0, 1), (1, 2), (2, 3)],
[(9, 10), (10, 0), (0, 1), (1, 2), (2, 3), (3, 17), (17, 18)],
]
expected_paths = []
for path_edges in expected_paths_edges:
path = Path(
geometry=Geometry(
[
GeoCoordinate(lat=coordinates[node_1].lat, lng=coordinates[node_1].lng)
for node_1, node_2 in path_edges
]
)
)
# Adds the last coordinate for last node
path.geometry.coords = path.geometry.coords + [
GeoCoordinate(
lat=coordinates[path_edges[-1][1]].lat,
lng=coordinates[path_edges[-1][1]].lng,
)
]
expected_paths.append(path)
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
spyderlib.plugins
=================
Here, 'plugins' are widgets designed specifically for Spyder
These plugins inherit the following classes
(SpyderPluginMixin & SpyderPluginWidget)
"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
import inspect
import os
# Third party imports
from spyderlib.qt import PYQT5
from spyderlib.qt.QtCore import Qt, Signal, QObject, QEvent, QPoint
from spyderlib.qt.QtGui import (QDockWidget, QWidget, QShortcut, QCursor,
QKeySequence, QMainWindow, QApplication,
QTabBar)
# Local imports
from spyderlib.utils.qthelpers import create_action, toggle_actions
from spyderlib.config.base import _
from spyderlib.config.gui import get_font, set_font
from spyderlib.config.main import CONF
from spyderlib.config.user import NoDefault
from spyderlib.plugins.configdialog import SpyderConfigPage
from spyderlib.py3compat import configparser, is_text_string
from spyderlib.utils import icon_manager as ima
class PluginConfigPage(SpyderConfigPage):
"""Plugin configuration dialog box page widget"""
def __init__(self, plugin, parent):
self.plugin = plugin
self.get_option = plugin.get_option
self.set_option = plugin.set_option
self.get_font = plugin.get_plugin_font
self.set_font = plugin.set_plugin_font
self.apply_settings = plugin.apply_plugin_settings
SpyderConfigPage.__init__(self, parent)
def get_name(self):
return self.plugin.get_plugin_title()
def get_icon(self):
return self.plugin.get_plugin_icon()
class TabFilter(QObject):
"""
Filter event attached to each QTabBar that holds 2 or more dockwidgets in
charge of handling tab rearangement.
This filter also holds the methods needed for the detection of a drag and
the movement of tabs.
"""
def __init__(self, dock_tabbar, main):
QObject.__init__(self)
self.dock_tabbar = dock_tabbar
self.main = main
self.moving = False
self.from_index = None
self.to_index = None
# Helper methods
def _get_plugin(self, index):
"""Get plugin reference based on tab index."""
for plugin in self.main.widgetlist:
if plugin.get_plugin_title() == self.dock_tabbar.tabText(index):
return plugin
def _get_plugins(self):
"""
Get a list of all plugin references in the QTabBar to which this
event filter is attached.
"""
plugins = []
for index in range(self.dock_tabbar.count()):
plugin = self._get_plugin(index)
plugins.append(plugin)
return plugins
def _fix_cursor(self, from_index, to_index):
"""Fix mouse cursor position to adjust for different tab sizes."""
# The direction is +1 (moving to the right) or -1 (moving to the left)
direction = abs(to_index - from_index)/(to_index - from_index)
tab_width = self.dock_tabbar.tabRect(to_index).width()
tab_x_min = self.dock_tabbar.tabRect(to_index).x()
tab_x_max = tab_x_min + tab_width
previous_width = self.dock_tabbar.tabRect(to_index - direction).width()
delta = previous_width - tab_width
if delta > 0:
delta = delta * direction
else:
delta = 0
cursor = QCursor()
pos = self.dock_tabbar.mapFromGlobal(cursor.pos())
x, y = pos.x(), pos.y()
if x < tab_x_min or x > tab_x_max:
new_pos = self.dock_tabbar.mapToGlobal(QPoint(x + delta, y))
cursor.setPos(new_pos)
def eventFilter(self, obj, event):
"""Filter mouse press events.
Events that are captured and not propagated return True. Events that
are not captured and are propagated return False.
"""
event_type = event.type()
if event_type == QEvent.MouseButtonPress:
self.tab_pressed(event)
return False
if event_type == QEvent.MouseMove:
self.tab_moved(event)
return True
if event_type == QEvent.MouseButtonRelease:
self.tab_released(event)
return True
return False
def tab_pressed(self, event):
"""Method called when a tab from a QTabBar has been pressed."""
self.from_index = self.dock_tabbar.tabAt(event.pos())
self.dock_tabbar.setCurrentIndex(self.from_index)
if event.button() == Qt.RightButton:
if self.from_index == -1:
self.show_nontab_menu(event)
else:
self.show_tab_menu(event)
def tab_moved(self, event):
"""Method called when a tab from a QTabBar has been moved."""
# If the left button isn't pressed anymore then return
if not event.buttons() & Qt.LeftButton:
self.to_index = None
return
self.to_index = self.dock_tabbar.tabAt(event.pos())
if not self.moving and self.from_index != -1 and self.to_index != -1:
QApplication.setOverrideCursor(Qt.ClosedHandCursor)
self.moving = True
if self.to_index == -1:
self.to_index = self.from_index
from_index, to_index = self.from_index, self.to_index
if from_index != to_index and from_index != -1 and to_index != -1:
self.move_tab(from_index, to_index)
self._fix_cursor(from_index, to_index)
self.from_index = to_index
def tab_released(self, event):
"""Method called when a tab from a QTabBar has been released."""
QApplication.restoreOverrideCursor()
self.moving = False
def move_tab(self, from_index, to_index):
"""Move a tab from a given index to a given index position."""
plugins = self._get_plugins()
from_plugin = self._get_plugin(from_index)
to_plugin = self._get_plugin(to_index)
from_idx = plugins.index(from_plugin)
to_idx = plugins.index(to_plugin)
plugins[from_idx], plugins[to_idx] = plugins[to_idx], plugins[from_idx]
for i in range(len(plugins)-1):
self.main.tabify_plugins(plugins[i], plugins[i+1])
from_plugin.dockwidget.raise_()
def show_tab_menu(self, event):
"""Show the context menu assigned to tabs."""
self.show_nontab_menu(event)
def show_nontab_menu(self, event):
"""Show the context menu assigned to nontabs section."""
menu = self.main.createPopupMenu()
menu.exec_(self.dock_tabbar.mapToGlobal(event.pos()))
class SpyderDockWidget(QDockWidget):
"""Subclass to override needed methods"""
plugin_closed = Signal()
def __init__(self, title, parent):
super(SpyderDockWidget, self).__init__(title, parent)
# Needed for the installation of the event filter
self.title = title
self.main = parent
self.dock_tabbar = None
# To track dockwidget changes the filter is installed when dockwidget
# visibility changes. This installs the filter on startup and also
# on dockwidgets that are undocked and then docked to a new location.
self.visibilityChanged.connect(self.install_tab_event_filter)
def closeEvent(self, event):
"""
Reimplement Qt method to send a signal on close so that "Panes" main
window menu can be updated correctly
"""
self.plugin_closed.emit()
def install_tab_event_filter(self, value):
"""
Install an event filter to capture mouse events in the tabs of a
QTabBar holding tabified dockwidgets.
"""
dock_tabbar = None
tabbars = self.main.findChildren(QTabBar)
for tabbar in tabbars:
for tab in range(tabbar.count()):
title = tabbar.tabText(tab)
if title == self.title:
dock_tabbar = tabbar
break
if dock_tabbar is not None:
self.dock_tabbar = dock_tabbar
# Install filter only once per QTabBar
if getattr(self.dock_tabbar, 'filter', None) is None:
self.dock_tabbar.filter = TabFilter(self.dock_tabbar,
self.main)
self.dock_tabbar.installEventFilter(self.dock_tabbar.filter)
class SpyderPluginMixin(object):
"""
Useful methods to bind widgets to the main window
See SpyderPluginWidget class for required widget interface
Signals:
* sig_option_changed
Example:
plugin.sig_option_changed.emit('show_all', checked)
* show_message
* update_plugin_title
"""
CONF_SECTION = None
CONFIGWIDGET_CLASS = None
IMG_PATH = 'images'
ALLOWED_AREAS = Qt.AllDockWidgetAreas
LOCATION = Qt.LeftDockWidgetArea
FEATURES = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
DISABLE_ACTIONS_WHEN_HIDDEN = True
# Signals
sig_option_changed = None
show_message = None
update_plugin_title = None
def __init__(self, main=None, **kwds):
"""Bind widget to a QMainWindow instance"""
super(SpyderPluginMixin, self).__init__(**kwds)
assert self.CONF_SECTION is not None
self.PLUGIN_PATH = os.path.dirname(inspect.getfile(self.__class__))
self.main = main
self.default_margins = None
self.plugin_actions = None
self.dockwidget = None
self.mainwindow = None
self.ismaximized = False
self.isvisible = False
# NOTE: Don't use the default option of CONF.get to assign a
# None shortcut to plugins that don't have one. That will mess
# the creation of our Keyboard Shortcuts prefs page
try:
self.shortcut = CONF.get('shortcuts', '_/switch to %s' % \
self.CONF_SECTION)
except configparser.NoOptionError:
self.shortcut = None
# We decided to create our own toggle action instead of using
# the one that comes with dockwidget because it's not possible
# to raise and focus the plugin with it.
self.toggle_view_action = None
def initialize_plugin(self):
"""Initialize plugin: connect signals, setup actions, ..."""
self.plugin_actions = self.get_plugin_actions()
if self.show_message is not None:
self.show_message.connect(self.__show_message)
if self.update_plugin_title is not None:
self.update_plugin_title.connect(self.__update_plugin_title)
if self.sig_option_changed is not None:
self.sig_option_changed.connect(self.set_option)
self.setWindowTitle(self.get_plugin_title())
self.create_toggle_view_action()
def on_first_registration(self):
"""Action to be performed on first plugin registration"""
# Was written to handle the very first plugin position in Spyder's
# main window layout, but this could also be used for other things
# (see for example the IPython console plugin for which this method
# had to be written to handle the fact that this plugin was
# introduced between v2.1 and v2.2)
raise NotImplementedError
def initialize_plugin_in_mainwindow_layout(self):
"""If this is the first time the plugin is shown, perform actions to
initialize plugin position in Spyder's window layout"""
if self.get_option('first_time', True):
try:
self.on_first_registration()
except NotImplementedError:
return
self.set_option('first_time', False)
def update_margins(self):
layout = self.layout()
if self.default_margins is None:
self.default_margins = layout.getContentsMargins()
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
layout.setContentsMargins(*[margin]*4)
else:
layout.setContentsMargins(*self.default_margins)
def __update_plugin_title(self):
"""Update plugin title, i.e. dockwidget or mainwindow title"""
if self.dockwidget is not None:
win = self.dockwidget
elif self.mainwindow is not None:
win = self.mainwindow
else:
return
win.setWindowTitle(self.get_plugin_title())
def create_dockwidget(self):
"""Add to parent QMainWindow as a dock widget"""
# This is not clear yet why the following do not work...
# (see Issue #880)
## # Using Qt.Window window flags solves Issue #880 (detached dockwidgets
## # are not painted after restarting Spyder and restoring their hexstate)
## # but it does not work with PyQt <=v4.7 (dockwidgets can't be docked)
## # or non-Windows platforms (lot of warnings are printed out)
## # (so in those cases, we use the default window flags: Qt.Widget):
## flags = Qt.Widget if is_old_pyqt or os.name != 'nt' else Qt.Window
dock = SpyderDockWidget(self.get_plugin_title(), self.main)#, flags)
dock.setObjectName(self.__class__.__name__+"_dw")
dock.setAllowedAreas(self.ALLOWED_AREAS)
dock.setFeatures(self.FEATURES)
dock.setWidget(self)
self.update_margins()
dock.visibilityChanged.connect(self.visibility_changed)
dock.plugin_closed.connect(self.plugin_closed)
self.dockwidget = dock
if self.shortcut is not None:
sc = QShortcut(QKeySequence(self.shortcut), self.main,
self.switch_to_plugin)
self.register_shortcut(sc, "_", "Switch to %s" % self.CONF_SECTION)
return (dock, self.LOCATION)
def create_mainwindow(self):
"""
Create a QMainWindow instance containing this plugin
Note: this method is currently not used
"""
self.mainwindow = mainwindow = QMainWindow()
mainwindow.setAttribute(Qt.WA_DeleteOnClose)
icon = self.get_plugin_icon()
if is_text_string(icon):
icon = self.get_icon(icon)
mainwindow.setWindowIcon(icon)
mainwindow.setWindowTitle(self.get_plugin_title())
mainwindow.setCentralWidget(self)
self.refresh_plugin()
return mainwindow
def create_configwidget(self, parent):
"""Create configuration dialog box page widget"""
if self.CONFIGWIDGET_CLASS is not None:
configwidget = self.CONFIGWIDGET_CLASS(self, parent)
configwidget.initialize()
return configwidget
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
raise NotImplementedError
def register_shortcut(self, qaction_or_qshortcut, context, name,
default=NoDefault):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.main.register_shortcut(qaction_or_qshortcut,
context, name, default)
def register_widget_shortcuts(self, context, widget):
"""
Register widget shortcuts
widget interface must have a method called 'get_shortcut_data'
"""
for qshortcut, name, default in widget.get_shortcut_data():
self.register_shortcut(qshortcut, context, name, default)
def switch_to_plugin(self):
"""Switch to plugin
This method is called when pressing plugin's shortcut key"""
if not self.ismaximized:
self.dockwidget.show()
if not self.toggle_view_action.isChecked():
self.toggle_view_action.setChecked(True)
self.visibility_changed(True)
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
if enable:
self.dockwidget.raise_()
widget = self.get_focus_widget()
if widget is not None:
widget.setFocus()
visible = self.dockwidget.isVisible() or self.ismaximized
if self.DISABLE_ACTIONS_WHEN_HIDDEN:
toggle_actions(self.plugin_actions, visible)
self.isvisible = enable and visible
if self.isvisible:
self.refresh_plugin() # To give focus to the plugin's widget
def plugin_closed(self):
"""DockWidget was closed"""
self.toggle_view_action.setChecked(False)
def set_option(self, option, value):
"""
Set a plugin option in configuration file
Use a SIGNAL to call it, e.g.:
plugin.sig_option_changed.emit('show_all', checked)
"""
CONF.set(self.CONF_SECTION, str(option), value)
def get_option(self, option, default=NoDefault):
"""Get a plugin option from configuration file"""
return CONF.get(self.CONF_SECTION, option, default)
def get_plugin_font(self, option=None):
"""Return plugin font option"""
return get_font(self.CONF_SECTION, option)
def set_plugin_font(self, font, option=None):
"""Set plugin font option"""
set_font(font, self.CONF_SECTION, option)
def __show_message(self, message, timeout=0):
"""Show message in main window's status bar"""
self.main.statusBar().showMessage(message, timeout)
def starting_long_process(self, message):
"""
Showing message in main window's status bar
and changing mouse cursor to Qt.WaitCursor
"""
self.__show_message(message)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents()
def ending_long_process(self, message=""):
"""
Clearing main window's status bar
and restoring mouse cursor
"""
QApplication.restoreOverrideCursor()
self.__show_message(message, timeout=2000)
QApplication.processEvents()
def set_default_color_scheme(self, name='Spyder'):
"""Set default color scheme (only once)"""
color_scheme_name = self.get_option('color_scheme_name', None)
if color_scheme_name is None:
names = CONF.get("color_schemes", "names")
if name not in names:
name = names[0]
self.set_option('color_scheme_name', name)
def create_toggle_view_action(self):
"""Associate a toggle view action with each plugin"""
title = self.get_plugin_title()
if self.CONF_SECTION == 'editor':
title = _('Editor')
if self.shortcut is not None:
action = create_action(self, title,
toggled=lambda checked: self.toggle_view(checked),
shortcut=QKeySequence(self.shortcut))
action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
else:
action = create_action(self, title, toggled=lambda checked:
self.toggle_view(checked))
self.toggle_view_action = action
def toggle_view(self, checked):
"""Toggle view"""
if checked:
self.dockwidget.show()
self.dockwidget.raise_()
else:
self.dockwidget.hide()
class SpyderPluginWidget(QWidget, SpyderPluginMixin):
"""
Spyder base widget class
Spyder's widgets either inherit this class or reimplement its interface
"""
sig_option_changed = Signal(str, object)
show_message = Signal(str, int)
update_plugin_title = Signal()
if PYQT5:
def __init__(self, parent, **kwds):
super(SpyderPluginWidget, self).__init__(parent, **kwds)
else:
def __init__(self, parent):
QWidget.__init__(self, parent)
SpyderPluginMixin.__init__(self, parent)
def get_plugin_title(self):
"""
Return plugin title
Note: after some thinking, it appears that using a method
is more flexible here than using a class attribute
"""
raise NotImplementedError
def get_plugin_icon(self):
"""
Return plugin icon (QIcon instance)
Note: this is required for plugins creating a main window
(see SpyderPluginMixin.create_mainwindow)
and for configuration dialog widgets creation
"""
return ima.icon('outline_explorer')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
pass
def closing_plugin(self, cancelable=False):
"""
Perform actions before parent main window is closed
Return True or False whether the plugin may be closed immediately or not
Note: returned value is ignored if *cancelable* is False
"""
return True
def refresh_plugin(self):
"""Refresh widget"""
raise NotImplementedError
def get_plugin_actions(self):
"""
Return a list of actions related to plugin
Note: these actions will be enabled when plugin's dockwidget is visible
and they will be disabled when it's hidden
"""
raise NotImplementedError
def register_plugin(self):
"""Register plugin in Spyder's main window"""
raise NotImplementedError
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('correctiv_justizgelder', '0004_auto_20150429_0959'),
]
operations = [
migrations.AlterField(
model_name='fine',
name='reference_id',
field=models.CharField(max_length=255, db_index=True),
preserve_default=True,
),
]
|
#!/usr/bin/env python
import sys
def main(number):
a_1 = 1
a_2 = 2
a = a_1 + a_2
result = 2
while a < number:
a_1 = a_2
a_2 = a
a = a_1 + a_2
if a % 2 == 0:
result += a
return result
if __name__ == '__main__':
print(main(int(sys.argv[1])))
|
#
#
# Takes the structs required for the udp data and turns them into json objects to
# pass through our websocket to the webpage for viewing
#
# Author: Kristian Nilssen
import json
def MotionData(packet):
players_car = packet.m_header.m_playerCarIndex
motion_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': players_car
},
'carMotionData': {
'worldPositionX': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldPositionX),
'worldPositionY': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldPositionY),
'worldPositionZ': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldPositionZ),
'worldVelocityX': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldVelocityX),
'worldVelocityY': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldVelocityY),
'worldVelocityZ': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldVelocityZ),
'worldForwardDirX': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldForwardDirX),
'worldForwardDirY': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldForwardDirY),
'worldForwardDirZ': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldForwardDirZ),
'worldRightDirX': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldRightDirX),
'worldRightDirY': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldRightDirY),
'worldRightDirZ': "{:.2f}".format(packet.m_carMotionData[players_car].m_worldRightDirZ),
'gForceLateral': "{:.2f}".format(packet.m_carMotionData[players_car].m_gForceLateral),
'gForceLongitudinal': "{:.2f}".format(packet.m_carMotionData[players_car].m_gForceLongitudinal),
'gForceVertical': "{:.2f}".format(packet.m_carMotionData[players_car].m_gForceVertical),
'yaw': "{:.2f}".format(packet.m_carMotionData[players_car].m_yaw),
'pitch': "{:.2f}".format(packet.m_carMotionData[players_car].m_pitch),
'roll': "{:.2f}".format(packet.m_carMotionData[players_car].m_roll)
},
'suspensionPosition':{
'RL': "{:.2f}".format(packet.m_suspensionPosition[0]),
'RR': "{:.2f}".format(packet.m_suspensionPosition[1]),
'FL': "{:.2f}".format(packet.m_suspensionPosition[2]),
'FR': "{:.2f}".format(packet.m_suspensionPosition[3])
},
'suspensionVelocity':{
'RL': "{:.2f}".format(packet.m_suspensionVelocity[0]),
'RR': "{:.2f}".format(packet.m_suspensionVelocity[1]),
'FL': "{:.2f}".format(packet.m_suspensionVelocity[2]),
'FR': "{:.2f}".format(packet.m_suspensionVelocity[3])
},
'suspensionAcceleration':{
'RL': "{:.2f}".format(packet.m_suspensionAcceleration[0]),
'RR': "{:.2f}".format(packet.m_suspensionAcceleration[1]),
'FL': "{:.2f}".format(packet.m_suspensionAcceleration[2]),
'FR': "{:.2f}".format(packet.m_suspensionAcceleration[3])
},
'wheelSpeed':{
'RL': "{:.2f}".format(packet.m_wheelSpeed[0]),
'RR': "{:.2f}".format(packet.m_wheelSpeed[1]),
'FL': "{:.2f}".format(packet.m_wheelSpeed[2]),
'FR': "{:.2f}".format(packet.m_wheelSpeed[3])
},
'wheelSlip':{
'RL': "{:.2f}".format(packet.m_wheelSlip[0]),
'RR': "{:.2f}".format(packet.m_wheelSlip[1]),
'FL': "{:.2f}".format(packet.m_wheelSlip[2]),
'FR': "{:.2f}".format(packet.m_wheelSlip[3])
},
'localVelocityX': "{:.2f}".format(packet.m_localVelocityX),
'localVelocityY':"{:.2f}".format( packet.m_localVelocityY),
'localVelocityZ': "{:.2f}".format(packet.m_localVelocityZ),
'angularVelocityX': "{:.2f}".format(packet.m_angularVelocityX),
'angularVelocityY': "{:.2f}".format(packet.m_angularVelocityY),
'angularVelocityZ': "{:.2f}".format(packet.m_angularVelocityZ),
'angularAccelerationX': "{:.2f}".format(packet.m_angularAccelerationX),
'angularAccelerationY': "{:.2f}".format(packet.m_angularAccelerationY),
'angularAccelerationZ': "{:.2f}".format(packet.m_angularAccelerationZ),
'frontWheelsAngle': "{:.2f}".format(packet.m_frontWheelsAngle)
}
return motion_data_json
def SessionData(packet):
weather_types = ['clear', 'light cloud', 'overcast', 'light rain', 'heavy rain', 'storm']
session_types = ['unknown', 'P1', 'P2', 'P3', 'Short P', 'Q1', 'Q2', 'Q3', 'Short Q', 'OSQ', 'R', 'R2', 'Time Trial']
track_types = ['Melbourne', 'Paul Ricard', 'Shanghai', 'Sakhir (Bahrain)', 'Catalunya', 'Monaco', 'Montreal', 'Silverstone', 'Hockenheim', 'Hungaroring',
'Spa', 'Monza', 'Singapore', 'Suzuka', 'Abu Dhabi', 'Texas', 'Brazil', 'Austria', 'Sochi', 'Mexico', 'Baku (Azerbaijan)', 'Sakhir Short',
'Silverstone Short', 'Texas Short', 'Suzuka Short']
era_types = ['modern', 'classic']
SLI_pro_support_type = ['inactive', 'active']
zone_flag_types = ['none', 'green', 'blue', 'yellow', 'red']
safety_car_types = ['no safety car', 'full safety car', 'virtual safety car']
network_game_types = ['offline', 'online']
session_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': packet.m_header.m_playerCarIndex
},
'weather': packet.m_weather, # Weather - 0 = clear, 1 = light cloud, 2 = overcast, 3 = light rain, 4 = heavy rain, 5 = storm
'trackTemperature': packet.m_trackTemperature,
'airTemperature': packet.m_airTemperature,
'totalLaps': packet.m_totalLaps,
'trackLength': packet.m_trackLength,
'sessionType': packet.m_sessionType, # 0 = unknown, 1 = P1, 2 = P2, 3 = P3, 4 = Short P, 5 = Q1, 6 = Q2, 7 = Q3, 8 = Short Q, 9 = OSQ, 10 = R, 11 = R2, 12 = Time Trial
'trackId': packet.m_trackId, # -1 for unknown, 0-21 for tracks, see appendix
'era': packet.m_era, # Era, 0 = modern, 1 = classic
'sessionTimeLeft': packet.m_sessionTimeLeft,
'sessionDuration': packet.m_sessionDuration,
'pitSpeedLimit': packet.m_pitSpeedLimit,
'gamePaused': packet.m_gamePaused,
'isSpectating': packet.m_isSpectating,
'spectatorCarIndex': packet.m_spectatorCarIndex,
'sliProNativeSupport': packet.m_sliProNativeSupport, # SLI Pro support, 0 = inactive, 1 = active
'numMarshalZones': packet.m_numMarshalZones,
'marshalZones': 'N/A',
'safetyCarStatus': safety_car_types[packet.m_safetyCarStatus], # 0 = no safety car, 1 = full safety car, 2 = virtual safety car
'networkGame': network_game_types[packet.m_networkGame] # 0 = offline, 1 = online
}
return session_data_json
def LapData(packet):
# pit_status_types = ['none', 'pitting', 'in pit area']
# sector_types = ['sector1', 'sector2', 'sector3']
# current_lap_invalid_types = ['valid', 'invalid']
# driver_status_types = ['in garage', 'flying lap', 'in lap', 'out lap', 'on track']
# result_status_types = ['invalid', 'inactive', 'active', 'finished', 'disqualified', 'not classified', 'retired']
players_car = packet.m_header.m_playerCarIndex
lap_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': players_car
},
'lapData': {
'lastLapTime': packet.m_lapData[players_car].m_lastLapTime,
'currentLapTime': packet.m_lapData[players_car].m_currentLapTime,
'bestLapTime': packet.m_lapData[players_car].m_bestLapTime,
'sector1Time': packet.m_lapData[players_car].m_sector1Time,
'sector2Time': packet.m_lapData[players_car].m_sector2Time,
'lapDistance': packet.m_lapData[players_car].m_lapDistance,
'totalDistance': packet.m_lapData[players_car].m_totalDistance,
'safetyCarDelta': packet.m_lapData[players_car].m_safetyCarDelta,
'carPosition': packet.m_lapData[players_car].m_carPosition,
'currentLapNum': packet.m_lapData[players_car].m_currentLapNum,
'pitStatus': packet.m_lapData[players_car].m_pitStatus, # 0 = none, 1 = pitting, 2 = in pit area
'sector': packet.m_lapData[players_car].m_sector, # 0 = sector1, 1 = sector2, 2 = sector3
'currentLapInvalid': packet.m_lapData[players_car].m_currentLapInvalid, # Current lap invalid - 0 = valid, 1 = invalid
'penalties': packet.m_lapData[players_car].m_penalties,
'gridPosition': packet.m_lapData[players_car].m_gridPosition,
'driverStatus': packet.m_lapData[players_car].m_driverStatus, # Status of driver - 0 = in garage, 1 = flying lap, 2 = in lap, 3 = out lap, 4 = on track
'resultStatus': packet.m_lapData[players_car].m_resultStatus, # Result status - 0 = invalid, 1 = inactive, 2 = active, 3 = finished, 4 = disqualified, 5 = not classified, 6 = retired
},
}
return lap_data_json
def EventData(packet):
event_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': packet.m_header.m_playerCarIndex
},
'eventStringCode': ''.join(packet.m_eventStringCode)
}
return event_data_json
def ParticipantData(packet):
# ai_controlled_types = ['Human', 'AI']
# driver_id_types = ['Carlos Sainz', '', 'Daniel Ricciardo', 'Fernando Alonso', '', '', 'Kimi Raikkonen', 'Lewis Hamilton', 'Marcus Ericsson', 'Max Verstappen',
# 'Nico Hulkenburg', 'Kevin Magnussen', 'Romain Grosjean', 'Sebastian Vettel', 'Sergio Perez', 'Valtteri Bottas', '', 'Esteban Ocon', 'Stoffel Vandoorne',
# 'Lance Stroll', 'Arron Barnes', 'Martin Giles', 'Alex Murray', 'Lucas Roth', 'Igor Correria', 'Sophie Levasseur', 'Jonas Schiffer', 'Alain Forest', 'Jay Letourneau',
# 'Esto Saari', 'Yasar Atiyeh', 'Callisto Calabresi', 'Naota Izum', 'Howard Clarke', 'Wilheim Kaufmann', 'Marie Laursen', 'Flavio Nieves', 'Peter Belousov',
# 'Klimek Michalski', 'Santiago Moreno', 'Benjamin Coppens', 'Noah Visser', 'Gert Waldmuller', 'Julian Quesada', 'Daniel Jones', '', '', '', '', '', '', '', '', '',
# '', '', '', '', 'Charles Leclerc', 'Pierre Gasly', 'Brendon Hartley', 'Sergey Siroktin', '', '', '', '', '', '', '', 'Ruben Meijer', 'Rashid Nair', 'Jack Tremblay']
# team_id_types = ['Mercedes', 'Ferrari', 'Red Bull', 'Williams', 'Force India', 'Renault', 'Toro Rosso', 'Haas', 'McLaren', 'Sauber', 'McLaren 1988', 'McLaren 1991',
# 'Williams 1992', 'Ferrari 1995', 'Williams 1996', 'McLaren 1998', 'Ferrari 2002', 'Ferrari 2004', 'Renault 2006', 'Ferrari 2007', 'McLaren 2008', 'Red Bull 2008',
# 'Ferrari 1976', '', '', '', '', '', '', '', '', '', '', '', 'McLaren 1976', 'Lotus 1972', 'Ferrari 1979', 'McLaren 1982', 'Williams 2003', 'Brawn 2009', 'Lotus 1978']
# nationality_types = ['American', 'Argentinean', 'Australian', 'Austrian', 'Azerbaijani', 'Bahraini', 'Belgian', 'Bolivian', 'Brazilian', 'British', 'Bulgarian', 'Camaroonian', 'Canadian',
# 'Chilean', 'Chinese', 'Columbian', 'Costa Rican', 'Croatian', 'Cypriot', 'Czech', 'Danish', 'Dutch', 'Ecuadorian', 'English', 'Emirian', 'Estonian', 'Finnish', 'French',
# 'German', 'Ghanian', 'Greek', 'Guatamalan', 'Honduran', 'Hong Konger', 'Hungarian', 'Icelander', 'Indian', 'Indonesian', 'Irish', 'Israeli', 'Italian', 'Jamacian',
# 'Japanese', 'Jordanian', 'Kuwati', 'Latvian', 'Lebanese', 'Lithuanian', 'Luxembourger', 'Malasaysian', 'Maltese', 'Mexican', 'Monegasque', 'New Zelander',
# 'Nicuraguan', 'North Korean', 'Northern Irish', 'Norwegian', 'Omani', 'Pakistani', 'Panamanian', 'Paraguayan', 'Peruvian', 'Polish', 'Portuguese', 'Qatari', 'Romanian',
# 'Russian', 'Salvadoran', 'Saudi', 'Scottish', 'Serbian', 'Singaporean', 'Slovakian', 'Slovenien', 'South Korea', 'South African', 'Spanish', 'Swedish', 'Swiss',
# 'Taiwanese', 'Thai', 'Turkish', 'Uruguayan', 'Ukrainian', 'Venezuelan', 'Welsh']
players_car = packet.m_header.m_playerCarIndex
participant_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': players_car
},
'numCars': packet.m_numCars,
'participants':{
'aiControlled': packet.m_participants[players_car].m_aiControlled, # Whether the vehicle is AI (1) or Human (0) controlled
'driverId': packet.m_participants[players_car].m_driverId, # Driver id - see appendix
'teamId': packet.m_participants[players_car].m_teamId, # Team id - see appendix
'raceNumber': packet.m_participants[players_car].m_raceNumber,
'nationality': packet.m_participants[players_car].m_nationality, # Nationality of the driver - see appendix
'name': packet.m_participants[players_car].m_name
},
}
return participant_data_json
def CarSetupData(packet):
players_car = packet.m_header.m_playerCarIndex
setup_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': players_car
},
'carSetups': {
'frontWing': packet.m_carSetups[players_car].m_frontWing,
'rearWing': packet.m_carSetups[players_car].m_rearWing,
'onThrottle': packet.m_carSetups[players_car].m_onThrottle,
'offThrottle': packet.m_carSetups[players_car].m_offThrottle,
'frontCamber': packet.m_carSetups[players_car].m_frontCamber,
'rearCamber': packet.m_carSetups[players_car].m_rearCamber,
'frontToe': packet.m_carSetups[players_car].m_frontToe,
'rearToe': packet.m_carSetups[players_car].m_rearToe,
'frontSuspension': packet.m_carSetups[players_car].m_frontSuspension,
'rearSuspension': packet.m_carSetups[players_car].m_rearSuspension,
'frontAntiRollBar': packet.m_carSetups[players_car].m_frontAntiRollBar,
'rearAntiRollBar': packet.m_carSetups[players_car].m_rearAntiRollBar,
'frontSuspensionHeight': packet.m_carSetups[players_car].m_frontSuspensionHeight,
'rearSuspensionHeight': packet.m_carSetups[players_car].m_rearSuspensionHeight,
'brakePressure': packet.m_carSetups[players_car].m_brakePressure,
'brakeBias': packet.m_carSetups[players_car].m_brakeBias,
'frontTyrePressure': packet.m_carSetups[players_car].m_frontTyrePressure,
'rearTyrePressure': packet.m_carSetups[players_car].m_rearTyrePressure,
'ballast': packet.m_carSetups[players_car].m_ballast,
'fuelLoad': packet.m_carSetups[players_car].m_fuelLoad
},
}
return setup_data_json
def CarTelemetryData(packet):
# drs_types = ['off', 'on']
# button_status_types = {'0x0001': 'Cross or A', '0x0002': 'Triangle or Y', '0x0004': 'Circle or B', '0x0008': 'Square or X', '0x0010': 'D-pad Left', '0x0020': 'D-pad Right',
# '0x0040': 'D-pad Up', '0x0080': 'D-pad Down', '0x0100': 'Options or Menu', '0x0200': 'L1 or LB', '0x0400': 'R2 or RB', '0x0800': 'L2 or LT',
# '0x1000': 'R2 or RT', '0x2000': 'Left Stick Click', '0x4000': 'Right Stick Click'}
players_car = packet.m_header.m_playerCarIndex
telemetry_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': players_car
},
'carTelemetryData':{
'speed': packet.m_carTelemetryData[players_car].m_speed,
'throttle': packet.m_carTelemetryData[players_car].m_throttle,
'steer': packet.m_carTelemetryData[players_car].m_steer,
'brake': packet.m_carTelemetryData[players_car].m_brake,
'clutch': packet.m_carTelemetryData[players_car].m_clutch,
'gear': packet.m_carTelemetryData[players_car].m_gear,
'engineRPM': packet.m_carTelemetryData[players_car].m_engineRPM,
'drs': packet.m_carTelemetryData[players_car].m_drs,
'revLightsPercent': packet.m_carTelemetryData[players_car].m_revLightsPercent,
'brakesTemperature':{
'RL': packet.m_carTelemetryData[players_car].m_brakesTemperature[0],
'RR': packet.m_carTelemetryData[players_car].m_brakesTemperature[1],
'FL': packet.m_carTelemetryData[players_car].m_brakesTemperature[2],
'FR': packet.m_carTelemetryData[players_car].m_brakesTemperature[3]
},
'tyresSurfaceTemperature':{
'RL': packet.m_carTelemetryData[players_car].m_tyresSurfaceTemperature[0],
'RR': packet.m_carTelemetryData[players_car].m_tyresSurfaceTemperature[1],
'FL': packet.m_carTelemetryData[players_car].m_tyresSurfaceTemperature[2],
'FR': packet.m_carTelemetryData[players_car].m_tyresSurfaceTemperature[3]
},
'tyresInnerTemperature':{
'RL': packet.m_carTelemetryData[players_car].m_tyresInnerTemperature[0],
'RR': packet.m_carTelemetryData[players_car].m_tyresInnerTemperature[1],
'FL': packet.m_carTelemetryData[players_car].m_tyresInnerTemperature[2],
'FR': packet.m_carTelemetryData[players_car].m_tyresInnerTemperature[3]
},
'engineTemperature': packet.m_carTelemetryData[players_car].m_engineTemperature,
'tyresPressure':{
'RL': packet.m_carTelemetryData[players_car].m_tyresPressure[0],
'RR': packet.m_carTelemetryData[players_car].m_tyresPressure[1],
'FL': packet.m_carTelemetryData[players_car].m_tyresPressure[2],
'FR': packet.m_carTelemetryData[players_car].m_tyresPressure[3]
}
},
'buttonStatus': 'N/A'
# 'buttonStatus': button_status_types[packet.m_buttonStatus]
}
return telemetry_data_json
def PacketCarStatusData(packet):
# traction_control_types = ['off', '', 'high']
# anti_lock_brakes_type = ['off', 'on']
# fuel_mix_types = ['lean', 'standard', 'rich', 'max']
# pit_limiter_types = ['off', 'on']
# drs_allowed_types = ['not allowed', 'allowed']
# tyre_compound_types = ['hyper soft', 'ultra soft', 'super soft', 'soft', 'medium', 'hard', 'super hard', 'inter', 'wet']
# vehicle_fia_flags_types = ['none', 'green', 'blue', 'yellow', 'red']
# ers_deploy_mode_types = ['none', 'low', 'medium', 'high', 'overtake', 'hotlap']
players_car = packet.m_header.m_playerCarIndex
status_data_json = {
'header': {
'packetFormat': packet.m_header.m_packetFormat,
'packetVersion': packet.m_header.m_packetVersion,
'packetId': packet.m_header.m_packetId,
'sessionUID': packet.m_header.m_sessionUID,
'sessionTime': packet.m_header.m_sessionTime,
'frameIdentifier': packet.m_header.m_frameIdentifier,
'playerCarIndex': players_car
},
'carStatusData':{
'tractionControl': packet.m_carStatusData[players_car].m_tractionControl, # 0 (off) - 2 (high)
'antiLockBrakes': packet.m_carStatusData[players_car].m_antiLockBrakes, # 0 (off) - 1 (on)
'fuelMix': packet.m_carStatusData[players_car].m_fuelMix, # Fuel mix - 0 = lean, 1 = standard, 2 = rich, 3 = max
'frontBrakeBias': packet.m_carStatusData[players_car].m_frontBrakeBias,
'pitLimiterStatus': packet.m_carStatusData[players_car].m_pitLimiterStatus, # Pit limiter status - 0 = off, 1 = on
'fuelInTank': packet.m_carStatusData[players_car].m_fuelInTank,
'fuelCapacity': packet.m_carStatusData[players_car].m_fuelCapacity,
'maxRPM': packet.m_carStatusData[players_car].m_maxRPM,
'idleRPM': packet.m_carStatusData[players_car].m_idleRPM,
'maxGears': packet.m_carStatusData[players_car].m_maxGears,
'drsAllowed': packet.m_carStatusData[players_car].m_drsAllowed, # 0 = not allowed, 1 = allowed, -1 = unknown
# 'tyresWear': packet.m_carStatusData[players_car].m_tyresWear,
'tyresWear':{
'RL': packet.m_carStatusData[players_car].m_tyresWear[0],
'RR': packet.m_carStatusData[players_car].m_tyresWear[1],
'FL': packet.m_carStatusData[players_car].m_tyresWear[2],
'FR': packet.m_carStatusData[players_car].m_tyresWear[3]
},
'tyreCompound': packet.m_carStatusData[players_car].m_tyreCompound, # Modern - 0 = hyper soft, 1 = ultra soft, 2 = super soft, 3 = soft, 4 = medium, 5 = hard, 6 = super hard, 7 = inter, 8 = wet, Classic - 0-6 = dry, 7-8 = wet
'tyresDamage':{
'RL': packet.m_carStatusData[players_car].m_tyresDamage[0],
'RR': packet.m_carStatusData[players_car].m_tyresDamage[1],
'FL': packet.m_carStatusData[players_car].m_tyresDamage[2],
'FR': packet.m_carStatusData[players_car].m_tyresDamage[3]
},
'frontLeftWingDamage': packet.m_carStatusData[players_car].m_frontLeftWingDamage,
'frontRightWingDamage': packet.m_carStatusData[players_car].m_frontRightWingDamage,
'rearWingDamage': packet.m_carStatusData[players_car].m_rearWingDamage,
'engineDamage': packet.m_carStatusData[players_car].m_engineDamage,
'gearBoxDamage': packet.m_carStatusData[players_car].m_gearBoxDamage,
'exhaustDamage': packet.m_carStatusData[players_car].m_exhaustDamage,
'vehicleFiaFlags': packet.m_carStatusData[players_car].m_vehicleFiaFlags, # -1 = invalid/unknown, 0 = none, 1 = green, 2 = blue, 3 = yellow, 4 = red
'ersStoreEnergy': packet.m_carStatusData[players_car].m_ersStoreEnergy,
'ersDeployMode': packet.m_carStatusData[players_car].m_ersDeployMode, # ERS deployment mode, 0 = none, 1 = low, 2 = medium, 3 = high, 4 = overtake, 5 = hotlap
'ersHarvestedThisLapMGUK': packet.m_carStatusData[players_car].m_ersHarvestedThisLapMGUK,
'ersHarvestedThisLapMGUH': packet.m_carStatusData[players_car].m_ersHarvestedThisLapMGUH,
'ersDeployedThisLap': packet.m_carStatusData[players_car].m_ersDeployedThisLap
},
}
return status_data_json
def structs(packet_name, packet):
if packet_name == 'MotionData':
return MotionData(packet)
elif packet_name == 'SessionData':
return SessionData(packet)
elif packet_name == 'LapData':
return LapData(packet)
elif packet_name == 'EventData':
return EventData(packet)
elif packet_name == 'ParticipantData':
return ParticipantData(packet)
elif packet_name == 'CarSetupData':
return CarSetupData(packet)
elif packet_name == 'CarTelemetryData':
return CarTelemetryData(packet)
elif packet_name == 'PacketCarStatusData':
return PacketCarStatusData(packet)
else:
print "ERROR, PACKET IS NOT WHAT IS EXPECTED!"
return None
|
#!/usr/bin/env python3
import json
import os
from io import BytesIO
from flask import Flask, request, json as json_flask
from vosk import Model, KaldiRecognizer
MODEL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model')
if not os.path.exists(MODEL_PATH):
print('Model folder not found ({}), bye.'.format(MODEL_PATH))
exit(1)
kaldi_model = Model(MODEL_PATH)
def stt(fp, buffer_size=8192) -> str:
kaldi = KaldiRecognizer(kaldi_model, 16000)
buf = bytearray(buffer_size)
im_ok = False
while fp.readinto(buf):
kaldi.AcceptWaveform(buf)
im_ok = True
return json.loads(kaldi.FinalResult())['text'] if im_ok else ''
app = Flask(__name__, static_url_path='')
@app.route('/stt', methods=['GET', 'POST'])
def say():
if request.method == 'POST':
target = None
if request.headers.get('Transfer-Encoding') == 'chunked':
target = request.stream
elif request.data:
target = BytesIO(request.data)
if target is None:
code, text = 1, 'No data'
else:
try:
code, text = 0, stt(target)
except Exception as e:
code, text = 3, 'Internal error'
print('{}: {}'.format(text, e))
else:
code, text = 2, 'What do you want? I accept only POST!'
return json_flask.jsonify(text=text, code=code)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8086, threaded=False)
|
basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}
for b in basket:
print(b)
|
#!/usr/bin/env python
ONLINE_RETAIL_XLSX = 'OnlineRetail.xlsx'
ONLINE_RETAIL_CSV = 'OnlineRetail.csv'
ONLINE_RETAIL_JSON = 'OnlineRetail.json'
ONLINE_RETAIL_CUSTOMERS = 'OnlineRetailCustomers.csv'
def download_spreadsheet():
print('Starting download_spreadsheet() ...')
# support python 2 and 3
try:
# python 3
import urllib.request as urlrequest
except ImportError:
import urllib as urlrequest
source_url = "http://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx"
urlrequest.urlretrieve(source_url, ONLINE_RETAIL_XLSX)
print('Finished download_spreadsheet() ...')
def create_csv():
print('Starting create_csv() ...')
import pandas as pd
import datetime
df = pd.read_excel(ONLINE_RETAIL_XLSX, sheetname='Online Retail')
# remove nan customer IDs
df = df[pd.notnull(df['CustomerID'])]
df['CustomerID'] = df['CustomerID'].astype(int)
# remove negative quantities - this also removes non-numeric InvoiceNo's
df = df.ix[df['Quantity'] > 0]
# Add a line item number for each item in an invoice
df['LineNo'] = df.groupby(['InvoiceNo']).cumcount()+1
# the dataset starts at approx 6am and finishes at approx 10pm
# we want to data to span 24 hours so that it is interesting for UK and American
# demos. We may also want to do a similar thing for Asia Pacific, perhaps
# we should have three regions and an 8 hour difference between each region?
df_UK_Day = df.copy()
df_US_Day = df.copy()
# add a suffix to the invoice number so that UK and US records have a unique id
df_UK_Day['InvoiceNo'] = (df_UK_Day['InvoiceNo'].astype('str') + '1').astype(int)
df_US_Day['InvoiceNo'] = (df_US_Day['InvoiceNo'].astype('str') + '2').astype(int)
# Let's approximate the overall time difference between US and UK as 12 hours
df_US_Day['InvoiceDate'] = df_US_Day['InvoiceDate'] + datetime.timedelta(hours=12)
df = pd.concat([df_UK_Day, df_US_Day])
# Sort dataframe
df['InvoiceTime'] = pd.DatetimeIndex(df['InvoiceDate']).time
df.sort_values(by=['InvoiceTime', 'InvoiceNo'], inplace=True)
# finally save
df.to_csv(ONLINE_RETAIL_CSV, index=False, encoding='utf-8', header=False)
df.to_json('OnlineRetail.json', orient='records', lines=True, date_format='epoch', date_unit='ms')
print('Finished create_csv() ...')
def create_customers():
print('Starting create_customers() ...')
import pandas as pd
import datetime
from faker import Faker
import numpy as np
# Load data
df_data_1 = pd.read_csv(ONLINE_RETAIL_CSV, header=None, usecols=[6])
# Get unique customer IDs
df_data_1 = df_data_1[6].unique()
# initiate faker
fake = Faker('en_GB')
# create lists of names and addresses
names = [ fake.name() for i in range(0,len(df_data_1)) ]
addresses = [ fake.address().replace("\n", " ") for i in range(0, len(df_data_1)) ]
# build df
df = pd.DataFrame({'CustomerID': df_data_1, 'Name': names, 'Address': addresses})
df['validFrom'] = datetime.datetime.now()
# df['validTo'] = np.NaN
# Sort dataframe
df.sort_values(by=['CustomerID'], inplace=True)
# finally save
df.to_csv(ONLINE_RETAIL_CUSTOMERS, index=False, encoding='utf-8', header=False)
print('Finished create_customers() ...')
def compress_files():
print('Starting compress_files() ...')
import gzip
import shutil
for filename in [ONLINE_RETAIL_XLSX, ONLINE_RETAIL_CSV, ONLINE_RETAIL_JSON, ONLINE_RETAIL_CUSTOMERS]:
with open(filename, 'rb') as f_in, gzip.open(filename + '.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print('Finished compress_files() ...')
def remove_file(filename):
import os
try:
os.remove(filename)
except OSError:
pass
if __name__ == "__main__":
for filename in [ONLINE_RETAIL_XLSX, ONLINE_RETAIL_CSV, ONLINE_RETAIL_JSON, ONLINE_RETAIL_CUSTOMERS]:
remove_file(filename)
for filename in [ONLINE_RETAIL_XLSX + '.gz', ONLINE_RETAIL_CSV + '.gz', ONLINE_RETAIL_JSON + '.gz', ONLINE_RETAIL_CUSTOMERS + '.gz']:
remove_file(filename)
download_spreadsheet()
create_csv()
create_customers()
compress_files()
|
import unittest
import utils
import datetime
import random
from _base import BaseTestCase
from app.models import User, Session
from app import create_app
class UserModelTestCase(BaseTestCase):
def test_to_json(self):
now = datetime.datetime.now()
session = utils.generate_session(
words = 100,
chars = 200,
accuracy = 100.0,
created_date = now,
user_id = 1
)
user = utils.generate_user(
id = 1,
social_id = 2,
email = 'example@email.com',
username = 'example',
last_seen = now
)
self.assertEqual({
'id': 1,
'username': 'example',
'email': 'example@email.com',
'last_seen': now,
'sessions': 1,
'words_score': 100,
'chars_score': 200,
'accuracy_score': 100.0
}, user.to_json())
class SessionModelTestCase(BaseTestCase):
def test_to_json(self):
# no need to add this to the database
now = datetime.datetime.now()
session = Session(
id = 1,
words = 100,
chars = 200,
accuracy = 100.0,
created_date = now,
user_id = 1
)
self.assertEqual({
'id': 1,
'words': 100,
'chars': 200,
'accuracy': 100.0,
'created_date': now,
'user_id': 1
}, session.to_json())
def test_creation_date(self):
session = utils.generate_session(user_id=1)
self.assertTrue((
datetime.datetime.now() - session.created_date).total_seconds() < 3)
def test_repr(self):
session = utils.generate_session(user_id=10)
self.assertEqual('<Session {!r}>'.format(session.words), repr(session))
def test_within_day_interval(self):
user = utils.generate_user()
sessions = utils.generate_sessions_within(user.id,
lambda : datetime.timedelta(minutes=random.randint(0, 360)))
query_result = Session.query.today(user.id)
self.assertEqual(sorted(sessions), sorted(query_result))
self.assertEqual([], Session.query.today(-1))
def test_within_week_interval(self):
user = utils.generate_user()
sessions = utils.generate_sessions_within(user.id,
lambda : datetime.timedelta(days=random.randint(0, 7)))
query_result = Session.query.last_week(user.id)
self.assertEqual(sorted(sessions), sorted(query_result))
self.assertEqual([], Session.query.last_week(-1))
def test_within_month_interval(self):
user = utils.generate_user()
sessions = utils.generate_sessions_within(user.id,
lambda : datetime.timedelta(days=random.randint(0, 30)))
query_result = Session.query.last_month(user.id)
self.assertEqual(sorted(sessions), sorted(query_result))
self.assertEqual([], Session.query.last_month(-1))
|
#coding: utf-8
#------------------------------------------------------------
# Um programa que calcula o valor a ser pago por um produto
# considerando o seu preço normal e condição de pagamento:
#------------------------------------------------------------
# • à vista dinheiro/cheque: 10% desconto
# • à vista no cartão: 5% de desconto
# • em até 2 vezes no cartão: preço normal
# • 3 vezes ou mais no cartão: 20% juros
#------------------------------------------------------------
# Gerador de Pagamentos - Exercício #044
#------------------------------------------------------------
valor = int(input('Valor das compras: '))
print('\033[31m_._.\033[m' *22)# linha decorativa(not important)
print('''[1] à vista dinheiro/cheque
[2] à vista cartão
[3] 2x cartão
[4] 3x ou mais no cartão''')
print('\033[31m_._.' *22)# linha decorativa(not important)
opcao = int(input('Qual a opção? :\033[m '))
if opcao == 1:
prc = valor * 10 / 100
print(f'\nVoce recebeu um desconto de 10% O valor agora é de R${valor - prc:.2f}.')
elif opcao == 2:
prc = valor * 5 / 100
print(f'\nVoce recebeu um desconto de 5% O valor agora é de R${valor - prc:.2f}.')
elif opcao == 3:
prc = valor / 2
print(f'\nSua compra será parcelada em 2x de R${prc:.2f} sem juros.')
elif opcao == 4:
prc = valor + (valor * 20 / 100)
vezes = int(input('\033[31mQuantas Parcelas? :\033[m '))
parcela = prc / vezes
print(f'\nSua compra será parcelada em {vezes}x de R${parcela:.2f}')
print(f'Com essa opção você tem 20 % de juros.. o valor final será: R${prc:.2f}')
else:
print('>>> Opção Inválida <<<')
print('\033[31m_._.\033[m' *22)# linha decorativa(not important)
|
from flask import Flask
from dash import Dash
def create_app(config_object='{}.settings'.format(__package__)):
server = Flask(__package__)
# load default settings
server.config.from_object(config_object)
# load additional settings that will override the defaults in settings.py. eg
# $ export SLAPDASH_SETTINGS=/some/path/prod_settings.py
server.config.from_envvar('SLAPDASH_SETTINGS', silent=True)
return server
def create_dash(server):
app = Dash(server=server)
app.title = server.config['TITLE']
app.config.routes_pathname_prefix = server.config['ROUTES_PATHNAME_PREFIX']
# Suppress callback validation as we will be initialising callbacks that target
# element IDs that won't yet occur in the layout.
app.config.supress_callback_exceptions = True
return app
|
import allel
from collections import Counter, OrderedDict
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from operator import itemgetter
import os
import sys
import re
from Admixture.utils import *
def write_founders_bcf(reference_file_bcf, sample_file, output_path, verb=True):
"""
Copy contents of sample map into founders map, create the bcg.gz files and index them.
"""
# founders map - same as input map except renamed
founder_map_cmd = "cp %s %s/founders.map" % (sample_file, output_path)
run_shell_cmd(founder_map_cmd, verb=verb)
founders_bcf_cmd = "cat {0}/founders.map | cut -f 1 | bcftools view --output-type b --output-file {0}/founders.bcf.gz --samples-file - --threads 12 --force-samples {1}".format(output_path, reference_file_bcf)
run_shell_cmd(founders_bcf_cmd, verb=verb)
index_founders_cmd = "bcftools index -f %s/founders.bcf.gz" % (output_path)
run_shell_cmd(index_founders_cmd, verb=verb)
# create vcf files for the founders for downstream conversion into npy files.
founders_vcf_cmd = "bcftools view %s/founders.bcf.gz -o %s/founders.vcf -O v"% (output_path,output_path)
run_shell_cmd(founders_vcf_cmd, verb=verb)
def simulate(reference_file_bcf, sample_file, idx_to_pop_map, genetic_map_file,
generations, num_out, sim_output_path, chm, use_phase_shift):
# path to RFMix/simulate binary
rfmix_sim_path = "./Admixture/simulate"
# NOTE: If running from a different directory than XGMIX/, this needs to
# be updated.
# assume everyone in the sample map is founder
print("Creating founders.bcf.gz for {}".format(sample_file))
write_founders_bcf(reference_file_bcf, sample_file, sim_output_path)
for gen in generations:
print("-"*80)
print("Simulation for generation {} from {}".format(gen,sample_file))
# generation simulation output path
gen_path = join_paths(sim_output_path, "gen_"+str(gen))
out_basename = gen_path+'/admix'
# Simulating the individuals via rfmix simulation
print("simulating ...")
rfmix_sim_cmd = rfmix_sim_path + " -f %s/founders.bcf.gz -m %s/founders.map -g %s -o %s --growth-rate=1.5 --maximum-size=2000 --n-output=%d -c %s -G %d -p %f --random-seed=%d %s"
rfmix_sim = rfmix_sim_cmd % (sim_output_path, sim_output_path, genetic_map_file, out_basename, num_out, chm, gen, 0.0, 123, "--dephase" if use_phase_shift else "")
try:
run_shell_cmd(rfmix_sim, verb=True)
except:
print("something went wrong using rfmix/simulate ...", end=" ")
print("trying -c chr"+chm+" insted of -c "+chm+" ...")
rfmix_sim = rfmix_sim_cmd % (sim_output_path, sim_output_path, genetic_map_file, out_basename, num_out, "chr"+chm, gen, 0.0, 123, "--dephase" if use_phase_shift else "")
run_shell_cmd(rfmix_sim, verb=True)
# reading .vcf output of simulation and converting to npy matricies
print('reading .vcf output of simulation and converting to npy matricies ...')
vcf_data = allel.read_vcf(out_basename+".query.vcf")
chm_len, nout, _ = vcf_data["calldata/GT"].shape
mat_vcf_2d = vcf_data["calldata/GT"].reshape(chm_len,nout*2).T
np.save(gen_path+"/mat_vcf_2d.npy", mat_vcf_2d)
# reading .map output of simulation and converting to npy matricies
print('reading .map output of simulation and converting to npy matricies ...')
map_path = out_basename + ".result"
matrix = sample_map_to_matrix(map_path)
# Finally map them to original labels (which can be further mapped to coordinates) and saving
matrix = np.vectorize(idx_to_pop_map.get)(matrix)
np.save(gen_path+"/mat_map.npy",matrix)
print("-"*80)
print("Finishing up ...")
|
""" Contains upgrade tasks that are executed when the application is being
upgraded on the server. See :class:`onegov.core.upgrade.upgrade_task`.
"""
from libres.db.models import Allocation, Reservation
from onegov.core.upgrade import upgrade_task
from onegov.reservation import LibresIntegration
from onegov.reservation import Resource
from sqlalchemy import Column, Text
def run_upgrades(context):
""" onegov.reservation is a bit special because it defines its tables
through its own declarative base. This is due to libres requireing its own
base.
As a consequence, not all applications loaded in the current process have
all the tables for all the modules (which is usually the case for all
onegov modules using the default onegov.core.orm.Base class).
This means we can only run the upgrades if Libres is integrated with
the current app.
"""
return isinstance(context.app, LibresIntegration)
@upgrade_task('Add form definition field')
def add_form_definition_field(context):
if run_upgrades(context):
context.operations.add_column(
'resources', Column('definition', Text, nullable=True)
)
@upgrade_task('Add resource group field')
def add_resource_group_field(context):
if run_upgrades(context):
context.operations.add_column(
'resources', Column('group', Text, nullable=True)
)
@upgrade_task('Add reservations/allocations type field')
def add_reservations_allocations_type_field(context):
if run_upgrades(context):
context.operations.add_column(
'reservations', Column('type', Text, nullable=True)
)
context.operations.add_column(
'allocations', Column('type', Text, nullable=True)
)
@upgrade_task('Make reservations/allocations payable')
def make_reservations_allocations_payable(context):
if run_upgrades(context):
for reservation in context.session.query(Reservation):
reservation.type = 'custom'
for allocation in context.session.query(Allocation):
allocation.type = 'custom'
@upgrade_task('Set defaults on existing resources')
def set_defaults_on_existing_reservation_resourcd_objects(context):
if run_upgrades(context):
for resource in context.session.query(Resource):
resource.payment_method = 'manual'
resource.pricing_method = 'free'
resource.price_per_hour = 0
resource.price_per_item = 0
resource.currency = 'CHF'
@upgrade_task('Add access_token to existing resources')
def add_access_token_to_existing_resources(context):
if run_upgrades(context):
for resource in context.session.query(Resource):
resource.renew_access_token()
@upgrade_task('Add default view to existing resource types')
def add_default_view_to_existing_resource_types(context):
if run_upgrades(context):
for resource in context.session.query(Resource):
if resource.type == 'daypass':
resource.default_view = 'month'
else:
resource.default_view = 'agendaWeek'
|
def checkIfPossibleRec(x, isPossible, n):
if x > n:
return
if isPossible[x]:
return
isPossible[x] = True
checkIfPossibleRec(x + num1, isPossible, n)
checkIfPossibleRec(x + num2, isPossible, n)
num1, num2, res = 2020, 2021, []
for i in range(int(input())):
n = int(input())
isPossible = [False] * (n + 1)
checkIfPossibleRec(0, isPossible, n)
res.append("Yes" if isPossible[n] else "No")
for i in res:
print(i)
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.datacatalog_v1.proto import datacatalog_pb2_grpc
class DataCatalogGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.datacatalog.v1 DataCatalog API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self, channel=None, credentials=None, address="datacatalog.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"data_catalog_stub": datacatalog_pb2_grpc.DataCatalogStub(channel)
}
@classmethod
def create_channel(
cls, address="datacatalog.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def search_catalog(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.search_catalog`.
Request message for ``CreateEntry``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].SearchCatalog
@property
def create_entry_group(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.create_entry_group`.
Required. The name of the entry group this entry is in. Example:
- projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
Note that this Entry and its child resources may not actually be stored
in the location in this name.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].CreateEntryGroup
@property
def get_entry_group(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.get_entry_group`.
Gets an EntryGroup.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].GetEntryGroup
@property
def update_entry_group(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.update_entry_group`.
The full name of the cloud resource the entry belongs to. See:
https://cloud.google.com/apis/design/resource_names#full_resource_name.
Example:
- ``//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId``
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].UpdateEntryGroup
@property
def delete_entry_group(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.delete_entry_group`.
JSON name of this field. The value is set by protocol compiler. If
the user has set a "json_name" option on this field, that option's value
will be used. Otherwise, it's deduced from the field's name by
converting it to camelCase.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].DeleteEntryGroup
@property
def list_entry_groups(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.list_entry_groups`.
Lists entry groups.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].ListEntryGroups
@property
def create_entry(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.create_entry`.
The name of the uninterpreted option. Each string represents a
segment in a dot-separated name. is_extension is true iff a segment
represents an extension (denoted with parentheses in options specs in
.proto files). E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false]
} represents "foo.(bar.baz).qux".
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].CreateEntry
@property
def update_entry(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.update_entry`.
Updates a tag template. This method cannot be used to update the
fields of a template. The tag template fields are represented as
separate resources and should be updated using their own
create/update/delete methods. Users should enable the Data Catalog API
in the project identified by the ``tag_template.name`` parameter (see
[Data Catalog Resource Project]
(/data-catalog/docs/concepts/resource-project) for more information).
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].UpdateEntry
@property
def delete_entry(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.delete_entry`.
The source system of the entry. Only applicable when
``search_result_type`` is ENTRY.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].DeleteEntry
@property
def get_entry(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.get_entry`.
Gets an entry.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].GetEntry
@property
def lookup_entry(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.lookup_entry`.
Get an entry by target resource name. This method allows clients to use
the resource name from the source Google Cloud Platform service to get the
Data Catalog Entry.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].LookupEntry
@property
def list_entries(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.list_entries`.
Lists entries.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].ListEntries
@property
def create_tag_template(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.create_tag_template`.
The resource has one pattern, but the API owner expects to add more
later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
that from being necessary once there are multiple patterns.)
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].CreateTagTemplate
@property
def get_tag_template(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.get_tag_template`.
Gets a tag template.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].GetTagTemplate
@property
def update_tag_template(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.update_tag_template`.
Request message for ``UpdateTagTemplateField``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].UpdateTagTemplate
@property
def delete_tag_template(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.delete_tag_template`.
The resource type. It must be in the format of
{service_name}/{resource_type_kind}. The ``resource_type_kind`` must be
singular and must not include version numbers.
Example: ``storage.googleapis.com/Bucket``
The value of the resource_type_kind must follow the regular expression
/[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
should use PascalCase (UpperCamelCase). The maximum number of characters
allowed for the ``resource_type_kind`` is 100.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].DeleteTagTemplate
@property
def create_tag_template_field(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.create_tag_template_field`.
Required. The name of the tag template field. Example:
- projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_template_field_id}
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].CreateTagTemplateField
@property
def update_tag_template_field(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.update_tag_template_field`.
Request message for ``SetIamPolicy`` method.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].UpdateTagTemplateField
@property
def rename_tag_template_field(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.rename_tag_template_field`.
The resource name of the tag in URL format. Example:
- projects/{project_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}/tags/{tag_id}
where ``tag_id`` is a system-generated identifier. Note that this Tag
may not actually be stored in the location in this name.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].RenameTagTemplateField
@property
def delete_tag_template_field(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.delete_tag_template_field`.
Optional. The relative resource name pattern associated with this
resource type. The DNS prefix of the full resource name shouldn't be
specified here.
The path pattern must follow the syntax, which aligns with HTTP binding
syntax:
::
Template = Segment { "/" Segment } ;
Segment = LITERAL | Variable ;
Variable = "{" LITERAL "}" ;
Examples:
::
- "projects/{project}/topics/{topic}"
- "projects/{project}/knowledgeBases/{knowledge_base}"
The components in braces correspond to the IDs for each resource in the
hierarchy. It is expected that, if multiple patterns are provided, the
same component name (e.g. "project") refers to IDs of the same type of
resource.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].DeleteTagTemplateField
@property
def create_tag(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.create_tag`.
Deletes a tag template and all tags using the template. Users should
enable the Data Catalog API in the project identified by the ``name``
parameter (see [Data Catalog Resource Project]
(/data-catalog/docs/concepts/resource-project) for more information).
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].CreateTag
@property
def update_tag(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.update_tag`.
Updates an existing tag.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].UpdateTag
@property
def delete_tag(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.delete_tag`.
Deletes a tag.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].DeleteTag
@property
def list_tags(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.list_tags`.
The SQL name of the entry. SQL names are case-sensitive.
Examples:
- ``cloud_pubsub.project_id.topic_id``
- :literal:`pubsub.project_id.`topic.id.with.dots\``
- ``bigquery.table.project_id.dataset_id.table_id``
- ``bigquery.dataset.project_id.dataset_id``
- ``datacatalog.entry.project_id.location_id.entry_group_id.entry_id``
``*_id``\ s shoud satisfy the standard SQL rules for identifiers.
https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].ListTags
@property
def set_iam_policy(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.set_iam_policy`.
``Tag`` details.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].SetIamPolicy
@property
def get_iam_policy(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.get_iam_policy`.
Request message for ``UpdateEntry``.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].GetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for :meth:`DataCatalogClient.test_iam_permissions`.
REQUIRED: The complete policy to be applied to the ``resource``. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["data_catalog_stub"].TestIamPermissions
|
# Importing library
import qrcode
# Data to be encoded
data = 'nah bro you clownin'
# Encoding data using make() function
img = qrcode.make(data)
# Saving as an image file
img.save('MyQRCode1.png')
|
def for_E():
"""printing capital 'E' using for loop"""
for row in range(5):
for col in range(4):
if row==0 or col==0 or row==2 or row==4:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_E():
"""printing capital 'E' using while loop"""
i=0
while i<5:
j=0
while j<4:
if j==0 or i==0 or i==2 or i==4:
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
|
#!/usr/bin/python3
import subprocess
import shlex
import time
import sys
import os
import socket
pull_command = 'pull-images'
STDOUT = subprocess.STDOUT
FNULL = open(os.devnull, 'w')
def pullImage(image):
pull = 'docker pull '
pull_result = subprocess.call(shlex.split(pull + image))
if(pull_result > 0):
print('Error on pull image ' + image)
exit(1)
def tagImage(image, tag):
new_tagged = ''
if ':' in image:
new_tagged = image.split(':')[0] + ':' + tag
else:
new_tagged = image + ':' + tag
tag_command = 'docker tag ' + image + ' ' + new_tagged
tag_result = subprocess.call(shlex.split(tag_command))
if(tag_result > 0):
print('Error on tag image ' + new_tagged)
exit(1)
return new_tagged
def pushImage(image):
push = 'docker push '
push_result = subprocess.call(shlex.split(push + image))
if(push_result > 0):
print('Error on push image ' + image)
exit(1)
def getBindingVolumes():
command = 'docker inspect --format "{{ range .HostConfig.Binds }}{{.}}|{{end}}" ' + \
os.environ['HOSTNAME']
bindingVolumes = str(subprocess.check_output(
shlex.split(command))).rstrip('\n')
return bindingVolumes
def getContainerImage():
command = 'docker inspect --format "{{ index .Config.Image }}" ' + \
os.environ['HOSTNAME']
image = str(subprocess.check_output(shlex.split(command))).rstrip('\n')
return image
def getContainerId():
return os.environ['HOSTNAME']
def getVersionFromHostContainer():
command = 'docker inspect --format "{{ index .Config.Labels.version }}" ' + \
os.environ['HOSTNAME']
version = str(subprocess.check_output(shlex.split(command))).rstrip('\n')
return version
def getRepoTag(imageTag):
command = 'docker inspect %s --format "{{index .RepoTags}}"' % (imageTag)
repoTag = ''
try:
repoTag = ''.join(subprocess.check_output(shlex.split(command)))
except (TypeError, subprocess.CalledProcessError) as e:
repoTag = 'imageNotExists'
return repoTag
def getLocalImages():
command = 'docker images --format "{{.Repository}}:{{.Tag}}"'
local_images = []
try:
local_images = subprocess.check_output(
shlex.split(command)).splitlines()
except (TypeError, subprocess.CalledProcessError) as e:
print('Error getting docker images: {}'.format(e))
return local_images
def deleteVolume(name):
print('')
print(' Deleting some volumes....')
command = 'docker volume rm ' + name
subprocess.call(shlex.split(command))
def deleteImages(images):
if (images):
command = 'docker rmi -f ' + images
subprocess.call(shlex.split(command), stderr=STDOUT, stdout=FNULL)
def deleteDanglingImages():
subcommand = 'docker images -f "dangling=true" -q'
result = str(subprocess.check_output(shlex.split(subcommand))).split('\n')
if (''.join(result)):
command = 'docker rmi -f ' + ' '.join(result)
print('Deleting dangling images')
try:
subprocess.check_output(shlex.split(command))
except subprocess.CalledProcessError:
print(' Unable to delete dangling images.')
def killContainer(container, signal):
if (signal is None or signal == ''):
command = 'docker kill %s ' % (container)
else:
command = 'docker kill --signal=%s %s ' % (signal, container)
p = subprocess.check_output(shlex.split(command))
return p
def executePlatformCommand(image, command, dockerArgs, commandArgs):
if (command == pull_command):
print('')
print(' Updating ElasTest components....')
print('')
command_line = ('docker run %s --rm -v /var/run/docker.sock:/var/run/docker.sock %s %s %s') % (
dockerArgs, image, command, commandArgs)
subprocess.call(shlex.split(command_line), stderr=STDOUT, stdout=FNULL)
def existsLocalImage(image):
if(':' not in image):
image + ':latest'
return True if image in getLocalImages() else False
def containerExists(containerId):
exists = False
command = [
'docker ps -a --format "{{.Names}}" | grep -E "^' + containerId + '$"']
try:
containerName = subprocess.check_output(
command, shell=True).split("\n")[0]
if(not containerName is None and containerName != ''):
exists = True
except TypeError:
exists = False
except subprocess.CalledProcessError:
exists = False
return exists
def containerExistsAndIsNotExited(containerId):
if(not containerExists(containerId)):
return False
notExited = True
command = 'docker container inspect -f "{{.State.ExitCode}}" ' + containerId
try:
exitCode = subprocess.check_output(shlex.split(command)).split("\n")[0]
if(exitCode != '0'):
notExited = False
except TypeError:
notExited = False
except subprocess.CalledProcessError:
notExited = False
return notExited
def writeContainerLogsToFile(containerId, completeFilePath):
if(not containerExists(containerId)):
return False
writed = True
command = 'docker logs ' + containerId + ' >& ' + completeFilePath
try:
exitCode = subprocess.call(command, shell=True)
if(exitCode != 0):
writed = False
except TypeError:
writed = False
except subprocess.CalledProcessError:
writed = False
return writed
def getWinHostMachineIp():
return getIpFromTraceRoute('docker.for.win.localhost')
def getMacHostMachineIp():
return getIpFromTraceRoute('docker.for.mac.localhost')
def getHostMachineIp():
return getIpFromTraceRoute('host.docker.internal')
def getIpFromTraceRoute(dns):
ip = None
try:
ip = socket.gethostbyname(dns)
except socket.error:
ip = None
return ip
def getHostOS():
if(not getWinHostMachineIp() is None):
return 'Windows'
elif(not getMacHostMachineIp() is None):
return 'Mac'
else:
return 'Other'
def hostOSIsWindows():
return getHostOS() == 'Windows'
def hostOSIsMac():
return getHostOS() == 'Mac'
def hostOSIsOther():
return getHostOS() == 'Other'
def getContainerIp(containerName, network):
command = "docker inspect --format=\"{{.NetworkSettings.Networks." + network + ".IPAddress}}\" "+ containerName
try:
ip = subprocess.check_output(shlex.split(command), stderr=subprocess.PIPE)
# remove /n
ip = ip.rstrip()
return ip
except subprocess.CalledProcessError:
raise Exception('Could not get the ip')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-29 16:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('home', '0002_create_homepage'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='heading',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='hero_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='homepage',
name='sub_heading',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
# -*- coding:utf-8 -*-
# !/usr/bin/env python
import argparse
import json
import matplotlib.pyplot as plt
import skimage.io as io
# from pycocotools.coco import COCO
import cv2
from labelme import utils
import numpy as np
import glob
import PIL.Image
import os
class MyEncoder(json.JSONEncoder):
def default(self,obj):
if isinstance(obj,np.integer):
return int(obj)
elif isinstance(obj,np.floating):
return float(obj)
elif isinstance(obj,np.ndarray):
return obj.tolist()
else:
return super(MyEncoder,self).default(obj)
class labelme2coco(object):
def __init__(self,labelme_json=[],save_json_path='./train_data/coco.json'):
'''
:param labelme_json: 所有labelme的json文件路径组成的列表
:param save_json_path: 保存结果json位置
'''
self.labelme_json=labelme_json
self.save_json_path=save_json_path
self.images=[]
self.categories=[]
self.annotations=[]
# self.data_coco = {}
self.label=[]
self.annID=1
self.height=0
self.width=0
self.save_json()
def data_transfer(self):
for num,json_file in enumerate(self.labelme_json):
with open(json_file,'r') as fp:
data=json.load(fp) # 加载json文件
self.images.append(self.image(data,num))
for shapes in data['shapes']:
label=shapes['label']
if label not in self.label:
self.categories.append(self.categorie(label))
self.label.append(label)
points=shapes['points'] #这里的point是用rectangle标注得到的,只有两个点,需要转成四个点
#points.append([points[0][0],points[1][1]])
#points.append([points[1][0],points[0][1]])
self.annotations.append(self.annotation(points,label,num))
self.annID+=1
def image(self,data,num):
image={}
img=utils.img_b64_to_arr(data['imageData']) # 解析原图片数据
# img=io.imread(data['imagePath']) # 通过图片路径打开图片
# img = cv2.imread(data['imagePath'], 0)
height,width=img.shape[:2]
img=None
image['height']=height
image['width']=width
image['id']=num+1
image['file_name'] = data['imagePath'].split('/')[-1]
#image['file_name']=data['imagePath'][3:14]
self.height=height
self.width=width
return image
def categorie(self,label):
categorie={}
categorie['supercategory']='MetaClass'
categorie['id']=len(self.label)+1 # 0 默认为背景
categorie['name']=label
return categorie
def annotation(self,points,label,num):
annotation={}
annotation['segmentation']=[list(np.asarray(points).flatten())]
annotation['iscrowd']=0
annotation['image_id']=num+1
# annotation['bbox'] = str(self.getbbox(points)) # 使用list保存json文件时报错(不知道为什么)
# list(map(int,a[1:-1].split(','))) a=annotation['bbox'] 使用该方式转成list
annotation['bbox']=list(map(float,self.getbbox(points)))
annotation['area']=annotation['bbox'][2]*annotation['bbox'][3]
# annotation['category_id'] = self.getcatid(label)
annotation['category_id']=self.getcatid(label) #注意,源代码默认为1
annotation['id']=self.annID
return annotation
def getcatid(self,label):
for categorie in self.categories:
if label==categorie['name']:
return categorie['id']
return 1
def getbbox(self,points):
# img = np.zeros([self.height,self.width],np.uint8)
# cv2.polylines(img, [np.asarray(points)], True, 1, lineType=cv2.LINE_AA) # 画边界线
# cv2.fillPoly(img, [np.asarray(points)], 1) # 画多边形 内部像素值为1
polygons=points
mask=self.polygons_to_mask([self.height,self.width],polygons)
return self.mask2box(mask)
def mask2box(self,mask):
'''从mask反算出其边框
mask:[h,w] 0、1组成的图片
1对应对象,只需计算1对应的行列号(左上角行列号,右下角行列号,就可以算出其边框)
'''
# np.where(mask==1)
index=np.argwhere(mask==1)
rows=index[:,0]
clos=index[:,1]
# 解析左上角行列号
left_top_r=np.min(rows) # y
left_top_c=np.min(clos) # x
# 解析右下角行列号
right_bottom_r=np.max(rows)
right_bottom_c=np.max(clos)
# return [(left_top_r,left_top_c),(right_bottom_r,right_bottom_c)]
# return [(left_top_c, left_top_r), (right_bottom_c, right_bottom_r)]
# return [left_top_c, left_top_r, right_bottom_c, right_bottom_r] # [x1,y1,x2,y2]
return [left_top_c,left_top_r,right_bottom_c-left_top_c,
right_bottom_r-left_top_r] # [x1,y1,w,h] 对应COCO的bbox格式
def polygons_to_mask(self,img_shape,polygons):
mask=np.zeros(img_shape,dtype=np.uint8)
mask=PIL.Image.fromarray(mask)
xy=list(map(tuple,polygons))
PIL.ImageDraw.Draw(mask).polygon(xy=xy,outline=1,fill=1)
mask=np.array(mask,dtype=bool)
return mask
def data2coco(self):
data_coco={}
data_coco['images']=self.images
data_coco['categories']=self.categories
data_coco['annotations']=self.annotations
return data_coco
def save_json(self):
self.data_transfer()
self.data_coco=self.data2coco()
# 保存json文件
json.dump(self.data_coco,open(self.save_json_path,'w'),indent=4,cls=MyEncoder) # indent=4 更加美观显示
labelme_json=glob.glob('./raw_data/*.json')
# labelme_json=['./Annotations/*.json']
labelme2coco(labelme_json,'./train_data/coco.json')
|
import sys
import os
import cv2
import numpy as np
# import imutils
from four_point_transform import four_point_transform
fullpath=os.path.dirname(os.path.realpath(__file__))
timestamp = sys.argv[1]
file_ext = sys.argv[2]
def func(image):
bordersize=100
image_padded=cv2.copyMakeBorder(image, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[255,255,255])
cpy = image_padded.copy()
gray = cv2.cvtColor(cpy,cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", imutils.resize(gray, height = 650))
#cv2.waitKey(0)
noiserem = cv2.GaussianBlur(gray, (5,5), 0)
edges = cv2.Canny(noiserem,150,200,apertureSize = 3)
#cv2.imshow("edges", imutils.resize(edges, height = 650))
#cv2.waitKey(0)
lines = cv2.HoughLines(edges,2,np.pi/180,600)
for x in range(0, len(lines)):
#for x1,y1,x2,y2 in lines[x]:
for rho,theta in lines[x]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 10000*(-b))
y1 = int(y0 + 10000*(a))
x2 = int(x0 - 10000*(-b))
y2 = int(y0 - 10000*(a))
cv2.line(cpy,(x1,y1),(x2,y2),(0,0,255),5)
#cv2.imshow("This",imutils.resize(cpy, height = 650))
#cv2.waitKey(0)
x_tl = -10000
y_tl = -10000
x_tr = 10000
y_tr = -10000
x_bl = -10000
y_bl = 10000
x_br = 10000
y_br = 10000
points = {}
#cv2.circle(cpy, (20, 1669), 50, (0,255,0), 10)
for x in range(len(lines)):
for y in range(x+1, len(lines)):
#print(lines)
r1,theta1 = lines[x][0]
r2,theta2 = lines[y][0]
#print(abs(theta1-theta2))
if((theta1 > 10.0*np.pi/180.0 and theta1< 80.0*np.pi/180.0) or (theta1 > 100.0*np.pi/180.0 and theta1< 170.0*np.pi/180.0)):
continue
if((theta2 > 10.0*np.pi/180.0 and theta2< 80.0*np.pi/180.0) or (theta2 > 100.0*np.pi/180.0 and theta2< 170.0*np.pi/180.0)):
continue
if(abs(theta1 - theta2)<5.0*np.pi/180.0):
continue
if(abs(theta1 - theta2)>175.0*np.pi/180.0):
continue
#print([theta1*180.0/np.pi ,theta2*180.0/np.pi])
s1 = np.sin(theta1)
c1 = np.cos(theta1)
s2 = np.sin(theta2)
c2 = np.cos(theta2)
d = np.sin(theta2 - theta1)
x1 = (r1*s2 - r2*s1)/d
y1 = (r2*c1 - r1*c2)/d
#points.append([x,y])
cv2.circle(cpy,(x1,y1), 50, (255,0,0), 10)
if( (0 - x1)*(0 - x1) + (0 - y1)*(0 - y1) < (0 - x_tl)*(0 - x_tl) + (0 - y_tl)*(0 - y_tl) ):
x_tl = x1
y_tl = y1
if( (0 - x1)*(0 - x1) + (cpy.shape[0] - y1)*(cpy.shape[0] - y1) < (0 - x_bl)*(0 - x_bl) + (cpy.shape[0] - y_bl)*(cpy.shape[0] - y_bl) ):
x_bl = x1
y_bl = y1
if( (cpy.shape[1] - x1)*(cpy.shape[1] - x1) + (0 - y1)*(0 - y1) < (cpy.shape[1] - x_tr)*(cpy.shape[1] - x_tr) + (0 - y_tr)*(0 - y_tr) ):
x_tr = x1
y_tr = y1
if( (cpy.shape[1] - x1)*(cpy.shape[1] - x1) + (cpy.shape[0] - y1)*(cpy.shape[0] - y1) < (cpy.shape[1] - x_br)*(cpy.shape[1] - x_br) + (cpy.shape[0] - y_br)*(cpy.shape[0] - y_br) ):
x_br = x1
y_br = y1
cv2.circle(cpy,(x_tl,y_tl), 200, (255,255,0), 10)
cv2.circle(cpy,(x_tr,y_tr), 150, (255,255,0), 10)
cv2.circle(cpy,(x_bl,y_bl), 100, (255,255,0), 10)
cv2.circle(cpy,(x_br,y_br), 50, (255,255,0), 10)
cv2.line(image_padded ,(x_tl,y_tl), (x_tr, y_tr), (255,0,0), 10)
cv2.line(image_padded ,(x_tr,y_tr), (x_br, y_br), (255,0,0), 10)
cv2.line(image_padded ,(x_br,y_br), (x_bl, y_bl), (255,0,0), 10)
cv2.line(image_padded ,(x_bl,y_bl), (x_tl, y_tl), (255,0,0), 10)
#cv2.imshow("This",imutils.resize(cpy, height = 650))
#cv2.waitKey(0)
points = np.array([ [x_tl, y_tl], [x_bl, y_bl], [x_br,y_br], [x_tr, y_tr]])
return image_padded,points
def pre_process(image):
cpy = image.copy()
#cv2.imshow("original",imutils.resize(image,height = 650))
cpy,points = func(image)
#cv2.imshow("ouput", imutils.resize(cpy,height = 650))
warped = four_point_transform(cpy, points)
#cv2.imshow("warped", imutils.resize(warped, height = 650))
#cv2.waitKey(0)
return warped
img1=cv2.imread(fullpath+'/uploads/'+timestamp+'/input.'+file_ext)
warped = pre_process(img1)
img1=cv2.resize(warped,(589,821))
cv2.imwrite(fullpath+'/uploads/'+timestamp+'/preprocessed-resized.'+file_ext,img1)
ret,binary = cv2.threshold(img1,205,255,cv2.THRESH_BINARY)
cv2.imwrite(fullpath+'/uploads/'+timestamp+'/binary_for_frontend.'+file_ext, binary)
img=img1
cv_size = lambda img: tuple(img.shape[1::-1])
s=cv_size(img)
m=s[1]/821
ret,img = cv2.threshold(img,120,255,cv2.THRESH_BINARY)
cv2.rectangle(img,(int(8*m),int(8*m)),(int(579*m),int(136*m)),(0,255,0),5)
cv2.rectangle(img,(int(8*m),int(136*m)),(int(579*m),int(204*m)),(0,255,0),5)
cv2.rectangle(img,(int(8*m),int(204*m)),(int(579*m),int(501*m)),(0,255,0),5)
cv2.rectangle(img,(int(8*m),int(501*m)),(int(579*m),int(813*m)),(0,255,0),5)
cv2.imwrite(fullpath+'/uploads/'+timestamp+'/ROIsegmented.'+file_ext,img)
|
"""
get_symbols.py
Date Created: March 13th, 2020
Author: georgefahmy
Description: Generate the list of stock symbols used for searching the stocks.
"""
# Base Python #
import ftplib
import logging
import os
import re
logger = logging.getLogger(__name__)
LOGGER_FORMAT = "[%(asctime)s] %(levelname)s: %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S"
def main():
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(LOGGER_FORMAT, datefmt=DATE_FORMAT))
logger.addHandler(handler)
# Connect to ftp.nasdaqtrader.com
logger.info("Generating ticker list...")
ftp = ftplib.FTP("ftp.nasdaqtrader.com", "anonymous", "anonymous@debian.org")
# Download files nasdaqlisted.txt and otherlisted.txt from ftp.nasdaqtrader.com
for file in ["nasdaqlisted.txt", "otherlisted.txt"]:
ftp.cwd("/SymbolDirectory")
localfile = open(file, "wb")
ftp.retrbinary("RETR " + file, localfile.write)
localfile.close()
ftp.quit()
# Grep for common stock in nasdaqlisted.txt and otherlisted.txt
for file in ["nasdaqlisted.txt", "otherlisted.txt"]:
localfile = open(file, "r")
for line in localfile:
ticker = line.split("|")[0]
name = line.split("|")[1]
# Append tickers to file tickers.txt
open("tickers.txt", "a+").write(ticker + "|" + name + "\n")
logger.info("Done!")
if __name__ == "__main__":
main()
|
from datetime import datetime
from typing import Union
from .const import PAGE, PAGE_SIZE
from .request_handler import RequestHandler
class BaseArrAPI(RequestHandler):
"""Base functions in all Arr API's"""
def __init__(self, host_url, api_key, ver_uri="/"):
self.ver_uri = ver_uri
super().__init__(host_url, api_key)
# CALENDAR
# GET /calendar/
def get_calendar(self, start_date=None, end_date=None, unmonitored=True):
"""Gets upcoming releases by monitored, if start/end are not
supplied, today and tomorrow will be returned
Args:
start_date (:obj:`datetime`, optional): ISO8601 start datetime. Defaults to None.
end_date (:obj:`datetime`, optional): ISO8601 end datetime. Defaults to None.
unmonitored (bool, optional): Include unmonitored movies. Defaults to True.
Returns:
JSON: Array
"""
path = "calendar"
params = {}
if start_date:
params["start"] = datetime.strptime(start_date, "%Y-%m-%d").strftime(
"%Y-%m-%d"
)
if end_date:
params["end"] = datetime.strptime(end_date, "%Y-%m-%d").strftime("%Y-%m-%d")
params["unmonitored"] = unmonitored
return self.request_get(path, self.ver_uri, params=params)
# SYSTEM
# GET /system/status
def get_system_status(self):
"""Returns system status
Returns:
JSON: Array
"""
path = "system/status"
return self.request_get(path, self.ver_uri)
# GET /health
def get_health(self):
"""Query radarr for health information
Returns:
JSON: Array
"""
path = "health"
return self.request_get(path, self.ver_uri)
# GET /metadata
def get_metadata(self):
"""Get all metadata consumer settings
Returns:
JSON: Array
"""
path = "metadata"
return self.request_get(path, self.ver_uri)
# GET /update
def get_update(self):
"""Will return a list of recent updated to Radarr
Returns:
JSON: Array
"""
path = "update"
return self.request_get(path, self.ver_uri)
# GET /rootfolder
def get_root_folder(self):
"""Get list of root folders, free space and any unmappedFolders
Returns:
JSON: Array
"""
path = "rootfolder"
return self.request_get(path, self.ver_uri)
# DELETE /rootfolder
def del_root_folder(self, id_): # sourcery skip: class-extract-method
"""Delete root folder with specified id
Args:
_id (int): Root folder id from database
Returns:
JSON: Array
"""
params = {"id": id_}
path = "rootfolder"
return self.request_del(path, self.ver_uri, params=params)
# GET /diskspace
def get_disk_space(self):
"""Query disk usage information
System > Status
Returns:
JSON: Array
"""
path = "diskspace"
return self.request_get(path, self.ver_uri)
# GET /system/backup
def get_backup(self):
"""Returns the list of available backups
Returns:
JSON: Array
"""
path = "system/backup"
return self.request_get(path, self.ver_uri)
# LOGS
# GET /log
def get_log(
self,
page=PAGE,
page_size=PAGE_SIZE,
sort_key="time",
sort_dir="desc",
filter_key=None,
filter_value="All",
):
"""Gets logs
Args:
page (int, optional): Specifiy page to return. Defaults to 1.
page_size (int, optional): Number of items per page. Defaults to 10.
sort_key (str, optional): Field to sort by. Defaults to "time".
sort_dir (str, optional): Direction to sort. Defaults to "desc".
filter_key (str, optional): Key to filter by. Defaults to None.
filter_value (str, optional): Value of the filter. Defaults to "All".
Returns:
JSON: Array
"""
params = {
"page": page,
"pageSize": page_size,
"sortKey": sort_key,
"sortDir": sort_dir,
"filterKey": filter_key,
"filterValue": filter_value,
}
return self.request_get("log", self.ver_uri, params=params)
# GET /history
# TODO: check the ID on this method may need to move to specific APIs
def get_history(
self, sort_key="date", page=PAGE, page_size=PAGE_SIZE, sort_dir="desc", id_=None
):
"""Gets history (grabs/failures/completed)
Args:
sort_key (str, optional): series.title or date. Defaults to "date".
page (int, optional): Page number to return. Defaults to 1.
page_size (int, optional): Number of items per page. Defaults to 10.
sort_dir (str, optional): Direction to sort the items. Defaults to "desc".
id_ (int, optional): Filter to a specific episode ID. Defaults to None.
Returns:
JSON: Array
"""
path = "history"
params = {
"sortKey": sort_key,
"page": page,
"pageSize": page_size,
"sortDir": sort_dir,
}
if id_:
params["episodeId"] = id_
return self.request_get(path, self.ver_uri, params=params)
# BLOCKLIST
# GET /blocklist
def get_blocklist(
self,
page=PAGE,
page_size=PAGE_SIZE,
sort_direction="descending",
sort_key="date",
):
"""Returns blocked releases.
Args:
page (int, optional): Page to be returned. Defaults to 1.
page_size (int, optional): Number of results per page. Defaults to 10.
sort_direction (str, optional): Direction to sort items. Defaults to "descending".
sort_key (str, optional): Field to sort by. Defaults to "date".
Returns:
JSON: Array
"""
params = {
"page": page,
"pageSize": page_size,
"sortDirection": sort_direction,
"sortKey": sort_key,
}
path = "blocklist"
return self.request_get(path, self.ver_uri, params=params)
# DELETE /blocklist
def del_blocklist(self, id_):
"""Removes a specific release (the id provided) from the blocklist
Args:
id_ (int): blocklist id from database
Returns:
JSON: Array
"""
params = {"id": id_}
path = "blocklist"
return self.request_del(path, self.ver_uri, params=params)
# DELETE /blocklist/bulk
def del_blocklist_bulk(self, data):
"""Delete blocked releases in bulk
Args:
data (dict): blocklists that should be deleted
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
path = "blocklist/bulk"
return self.request_del(path, self.ver_uri, data=data)
# PROFILES
# GET /qualityprofile/{id}
def get_quality_profile(self, id_=None):
"""Gets all quality profiles or specific one with id_
Args:
id_ (int): quality profile id from database
Returns:
JSON: Array
"""
path = f"qualityprofile/{id_}" if id_ else "qualityprofile"
return self.request_get(path, self.ver_uri)
# PUT /qualityprofile/{id}
def upd_quality_profile(self, id_, data):
"""Update the quality profile data.
Note:
To be used in conjunction with get_quality_profile()
Args:
id_ (int): Profile ID to Update
data (dict): All parameters to update
Returns:
JSON: Array
"""
path = f"qualityprofile/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# DELETE /qualityprofile
def del_quality_profile(self, id_):
"""Removes a specific quality profile from the blocklist
Args:
id_ (int): quality profile id from database
Returns:
JSON: Array
"""
params = {"id": id_}
path = "qualityprofile"
return self.request_del(path, self.ver_uri, params=params)
# GET /qualitydefinition/{id}
def get_quality_definition(self, id_=None):
"""Gets all quality definitions or specific one by ID
Args:
id_ (int, optional): Import list database id. Defaults to None.
Returns:
JSON: Array
"""
path = f"qualitydefinition/{id_}" if id_ else "qualitydefinition"
return self.request_get(path, self.ver_uri)
# PUT /qualitydefinition/{id}
def upd_quality_definition(self, id_, data):
"""Update the quality definitions.
Note:
To be used in conjunction with get_quality_definition()
Args:
id_ (int): ID of definition to update
data (dict): All parameters to update
Returns:
JSON: Array
"""
path = f"qualitydefinition/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# INDEXER
# GET /indexer/{id}
def get_indexer(self, id_=None):
"""Get all indexers or specific by id_
Args:
id_ (int, optional): database if of indexer to return. Defaults to 1None.
Returns:
JSON: Array
"""
path = f"indexer/{id_}" if id_ else "indexer"
return self.request_get(path, self.ver_uri)
# PUT /indexer/{id}
def upd_indexer(self, id_, data):
"""Edit a Indexer by database id
Note:
To be used in conjunction with get_indexer()
Args:
id_ (int): Indexer database id
data (dict): data to be updated within Indexer
Returns:
JSON: Array
"""
path = f"indexer/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# DELETE /indexer
def del_indexer(self, id_):
"""Removes a specific indexer from the blocklist
Args:
id_ (int): indexer id from database
Returns:
JSON: Array
"""
params = {"id": id_}
path = "indexer"
return self.request_del(path, self.ver_uri, params=params)
# QUEUE
# DELETE /queue/{id}
def del_queue(self, id_, remove_from_client=True, blacklist=True):
"""Remove an item from the queue and optionally blacklist it
Args:
id_ (int): id of the item to be removed
remove_from_client (bool, optional): Remove the item from the client. Defaults to True.
blacklist (bool, optional): Add the item to the blacklist. Defaults to True.
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
params = {"removeFromClient": remove_from_client, "blacklist": blacklist}
path = f"queue/{id_}"
return self.request_del(path, self.ver_uri, params=params)
# GET /system/task/{id}
def get_task(self, id_=None):
"""Return a list of tasks, or specify a task ID to return single task
Args:
id_ (int): ID for task
Returns:
JSON: Array
"""
path = f"system/task/{id_}" if id_ else "system/task"
return self.request_get(path, self.ver_uri)
# GET /remotepathmapping
def get_remote_path_mapping(self, id_: Union[int, None] = None):
"""Get remote path mappings for downloads Directory
Args:
id_ (Union[int, None], optional): ID for specific record. Defaults to None.
Returns:
JSON: Array
"""
_path = "" if isinstance(id_, str) or id_ is None else f"/{id_}"
return self.request_get(f"remotepathmapping{_path}", self.ver_uri)
# CONFIG
# GET /config/ui
def get_config_ui(self):
"""Query Radarr for UI settings
Returns:
JSON: Array
"""
path = "config/ui"
return self.request_get(path, self.ver_uri)
# PUT /config/ui
def upd_config_ui(self, data):
"""Edit one or many UI settings and save to to the database
Args:
data (dict): data to be Updated
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
path = "config/ui"
return self.request_put(path, self.ver_uri, data=data)
# GET /config/host
def get_config_host(self):
"""Get General/Host settings.
Returns:
JSON: Array
"""
path = "config/host"
return self.request_get(path, self.ver_uri)
# PUT /config/host
def upd_config_host(self, data):
"""Edit General/Host settings.
Args:
data (dict): data to be updated
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
path = "config/host"
return self.request_put(path, self.ver_uri, data=data)
# GET /config/naming
def get_config_naming(self):
"""Get Settings for file and folder naming.
Returns:
JSON: Array
"""
path = "config/naming"
return self.request_get(path, self.ver_uri, self.ver_uri)
# PUT /config/naming
def upd_config_naming(self, data):
"""Edit Settings for file and folder naming.
Args:
data (dict): data to be updated
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
path = "config/naming"
return self.request_put(path, self.ver_uri, data=data)
# GET /config/mediamanagement
def get_media_management(self):
"""Get media management configuration
Returns:
JSON: Array
"""
path = "config/mediamanagement"
return self.request_get(path, self.ver_uri)
# NOTIFICATIONS
# GET /notification
def get_notification(self, id_: Union[int, None] = None):
"""Get a list of all notification services, or single by ID
Args:
id_ (int | None, optional): Notification ID. Defaults to None.
Returns:
JSON: Array
"""
_path = "" if isinstance(id_, str) or id_ is None else f"/{id_}"
return self.request_get(f"notification{_path}", self.ver_uri)
# GET /notification/schema
def get_notification_schema(self):
"""Get possible notification connections
Returns:
JSON: Array
"""
path = "notification/schema"
return self.request_get(path, self.ver_uri)
# PUT /notification/{id}
def upd_notification(self, id_, data):
"""Edit notification by database id
Args:
id_ (int): Database id of notification
data (dict): data that requires updating
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
path = f"notification/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# DELETE /notification/{id}
def del_notification(self, id_):
"""Delete a notification by its database id
Args:
id_ (int): Database id of notification
Returns:
JSON: 201 Ok, 401 Unauthorized
"""
path = f"notification/{id_}"
return self.request_del(path, self.ver_uri)
# TAGS
# GET /tag/{id}
def get_tag(self, id_=None):
"""Returns all tags or specific tag by database id
Args:
id_ (int, optional): Database id for tag. Defaults to None.
Returns:
JSON: Array
"""
path = f"tag/{id_}" if id_ else "tag"
return self.request_get(path, self.ver_uri)
# GET /tag/detail/{id}
def get_tag_detail(self, id_=None):
"""Returns all tags or specific tag by database id with detailed information
Args:
id_ (int, optional): Database id for tag. Defaults to None.
Returns:
JSON: Array
"""
path = f"tag/detail/{id_}" if id_ else "tag/detail"
return self.request_get(path, self.ver_uri)
# POST /tag
def create_tag(self, label):
"""Adds a new tag
Args:
label (str): Tag name / label
Returns:
JSON: Array
"""
data = {"label": label}
path = "tag"
return self.request_post(path, self.ver_uri, data=data)
# PUT /tag/{id}
def upd_tag(self, id_, label):
"""Update an existing tag
Note:
You should perform a get_tag() and submit the full body with changes
Args:
id_ (int): Database id of tag
label (str): tag name / label
Returns:
JSON: Array
"""
data = {"id": id_, "label": label}
path = f"tag/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# DELETE /tag/{id}
def del_tag(self, id_):
"""Delete the tag with the given ID
Args:
id_ (int): Database id of tag
Returns:
JSON: {}
"""
path = f"tag/{id_}"
return self.request_del(path, self.ver_uri)
# DOWNLOAD CLIENT
# GET /downloadclient/{id}
def get_download_client(self, id_=None):
"""Get a list of all the download clients or a single client by its database id
Args:
id_ (int, optional): Download client database id. Defaults to None.
Returns:
JSON: Array
"""
path = f"downloadclient/{id_}" if id_ else "downloadclient"
return self.request_get(path, self.ver_uri)
# GET /downloadclient/schema
def get_download_client_schema(self, implementation_: Union[str, None] = None):
"""Gets the schemas for the different download Clients
Args:
implementation_ (Union[str, None], optional): Client implementation name. Defaults to None.
Returns:
JSON: Array
"""
schemas: dict = self.request_get("downloadclient/schema", self.ver_uri)
if implementation_:
return [
schema
for schema in schemas
if schema["implementation"] == implementation_
]
return schemas
# POST /downloadclient/
def add_download_client(self, data):
"""Add a download client based on the schema information supplied
Note:
Recommended to be used in conjunction with get_download_client_schema()
Args:
data (dict): dictionary with download client schema and settings
Returns:
JSON: Array
"""
return self.request_post("downloadclient", self.ver_uri, data=data)
# PUT /downloadclient/{id}
def upd_download_client(self, id_, data):
"""Edit a downloadclient by database id
Args:
id_ (int): Download client database id
data (dict): data to be updated within download client
Returns:
JSON: 200 Ok
"""
path = f"downloadclient/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# DELETE /downloadclient/{id}
def del_download_client(self, id_):
"""Delete a download client by database id
Args:
id_ (int): download client database id
Returns:
JSON: 200 Ok
"""
path = f"downloadclient/{id_}"
return self.request_del(path, self.ver_uri)
# IMPORT LIST
# GET /importlist
def get_import_list(self, id_=None):
"""Query for all lists or a single list by its database id
Args:
id_ (int, optional): Import list database id. Defaults to None.
Returns:
JSON: Array
"""
path = f"importlist/{id_}" if id_ else "importlist"
return self.request_get(path, self.ver_uri)
# POST /importlist/
def add_import_list(self):
"""This is not implemented yet
Raises:
NotImplementedError: Error
"""
raise NotImplementedError()
# PUT /importlist/{id}
def upd_import_list(self, id_, data):
"""Edit an importlist
Args:
id_ (int): Import list database id
data (dict): data to be updated within the import list
Returns:
JSON: 200 Ok, 401 Unauthorized
"""
path = f"importlist/{id_}"
return self.request_put(path, self.ver_uri, data=data)
# DELETE /importlist/{id}
def del_import_list(self, id_: int):
"""Delete an import list
Args:
id_ (int): Import list database id
Returns:
JSON: 200 ok, 401 Unauthorized
"""
return self.request_del(f"importlist/{id_}", self.ver_uri)
# GET /config/downloadclient
def get_config_download_client(self):
"""Gets download client page configuration
Returns:
JSON: Array
"""
return self.request_get("config/downloadclient", self.ver_uri)
# POST /notifications
def add_notifications(self):
"""This is not implemented yet
Raises:
NotImplementedError: Error
"""
raise NotImplementedError()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pythonizame.apps.website.models
class Migration(migrations.Migration):
dependencies = [
('website', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='siteconfiguration',
name='favicon',
field=models.ImageField(upload_to=pythonizame.apps.website.models.website_logo, blank=True, null=True),
),
migrations.AlterField(
model_name='siteconfiguration',
name='logo',
field=models.ImageField(upload_to=pythonizame.apps.website.models.website_logo, blank=True, null=True),
),
]
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package gpvdm_tab
# A table widget
#
import os
#qt
from PyQt5.QtWidgets import QTextEdit, QAction, QMenu
from PyQt5.QtCore import QSize, Qt , QPersistentModelIndex
from PyQt5.QtWidgets import QWidget,QPushButton,QToolBar, QVBoxLayout, QTableWidget,QAbstractItemView, QTableWidgetItem, QComboBox, QApplication
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import pyqtSignal
from QComboBoxLang import QComboBoxLang
from icon_lib import icon_get
from gpvdm_select import gpvdm_select
from energy_to_charge import energy_to_charge
from gtkswitch import gtkswitch
from leftright import leftright
from str2bool import str2bool
class gpvdm_tab(QTableWidget):
changed = pyqtSignal()
user_remove_rows = pyqtSignal()
def __init__(self,toolbar=None):
QTableWidget.__init__(self)
self.toolbar=toolbar
self.paste_callback=None
self.setSelectionBehavior(QAbstractItemView.SelectItems)
self.SelectionMode (QAbstractItemView.SingleSelection)
if self.toolbar!=None:
self.toolbar.setIconSize(QSize(32, 32))
self.tb_add = QAction(icon_get("list-add"), _("Add"), self)
self.toolbar.addAction(self.tb_add)
self.tb_remove = QAction(icon_get("list-remove"), _("Delete row"), self)
self.toolbar.addAction(self.tb_remove)
self.tb_remove.triggered.connect(self.emit_remove_rows)
self.tb_down= QAction(icon_get("go-down"), _("Move down"), self)
self.toolbar.addAction(self.tb_down)
self.tb_up= QAction(icon_get("go-up"), _("Move up"), self)
self.toolbar.addAction(self.tb_up)
self.menu = QMenu(self)
self.menu_copy = QAction(_("Copy"), self)
self.menu_copy.triggered.connect(self.callback_menu_copy)
self.menu.addAction(self.menu_copy)
self.menu_paste = QAction(_("Paste"), self)
self.menu.addAction(self.menu_paste)
self.menu_paste.triggered.connect(self.callback_menu_paste)
self.menu_delete = QAction(icon_get("list-remove"),_("Delete row"), self)
self.menu.addAction(self.menu_delete)
self.menu_delete.triggered.connect(self.emit_remove_rows)
def callback_menu_copy(self):
if self.rowCount()==0:
return
rows=self.selectionModel().selectedRows()
ret=""
for a in rows:
a=a.row()
for i in range(0,self.columnCount()):
ret=ret+str(self.get_value(a,i))+";"
ret=ret[:-1]
ret=ret+"\n"
cb = QApplication.clipboard()
cb.clear(mode=cb.Clipboard )
cb.setText(ret, mode=cb.Clipboard)
def callback_menu_paste(self):
self.blockSignals(True)
cb = QApplication.clipboard()
text=cb.text()
lines=text.rstrip().split()
item=self.selectedIndexes()[0]
y=item.row()
#x_start=item.column()
for l in lines:
if (y==self.rowCount()):
self.insertRow(y)
x=0
for s in l.split(";"):
self.set_value(y,x,s)
x=x+1
y=y+1
self.blockSignals(False)
def contextMenuEvent(self, event):
self.menu.popup(QCursor.pos())
def set_value(self,y,x,value):
if type(self.cellWidget(y, x))==QComboBox:
self.cellWidget(y, x).blockSignals(True)
self.cellWidget(y, x).setCurrentIndex(self.cellWidget(y, x).findText(value))
self.cellWidget(y, x).blockSignals(False)
elif type(self.cellWidget(y, x))==QComboBoxLang:
self.cellWidget(y, x).blockSignals(True)
self.cellWidget(y, x).setValue_using_english(value)
self.cellWidget(y, x).blockSignals(False)
elif type(self.cellWidget(y,x))==gpvdm_select:
self.cellWidget(y, x).blockSignals(True)
self.cellWidget(y, x).setText(value)
self.cellWidget(y, x).blockSignals(False)
elif type(self.cellWidget(y,x))==energy_to_charge:
self.cellWidget(y, x).blockSignals(True)
self.cellWidget(y, x).updateValue(value)
self.cellWidget(y, x).blockSignals(False)
elif type(self.cellWidget(y,x))==gtkswitch:
self.cellWidget(y, x).blockSignals(True)
self.cellWidget(y, x).set_value(str2bool(value))
self.cellWidget(y, x).blockSignals(False)
else:
item = QTableWidgetItem(str(value))
self.setItem(y,x,item)
def move_down(self):
ret=-1
if self.rowCount()==0:
return -1
self.blockSignals(True)
a=self.selectionModel().selectedRows()
if len(a)>0:
a=a[0].row()
b=a+1
if b>self.rowCount()-1:
return -1
ret=a
av=[]
for i in range(0,self.columnCount()):
av.append(str(self.get_value(a,i)))
bv=[]
for i in range(0,self.columnCount()):
bv.append(str(self.get_value(b,i)))
for i in range(0,self.columnCount()):
self.set_value(b,i,str(av[i]))
self.set_value(a,i,str(bv[i]))
self.selectRow(b)
self.blockSignals(False)
return ret
else:
return -1
def get_value(self,y,x):
if type(self.cellWidget(y, x))==QComboBox:
return self.cellWidget(y, x).currentText()
elif type(self.cellWidget(y, x))==QComboBoxLang:
return self.cellWidget(y, x).currentText_english()
elif type(self.cellWidget(y,x))==gpvdm_select:
return self.cellWidget(y, x).text()
elif type(self.cellWidget(y,x))==energy_to_charge:
return self.cellWidget(y, x).text()
elif type(self.cellWidget(y,x))==leftright:
return self.cellWidget(y, x).get_value()
elif type(self.cellWidget(y,x))==gtkswitch:
return self.cellWidget(y, x).get_value()
else:
return self.item(y, x).text()
def add(self,data):
self.blockSignals(True)
index = self.selectionModel().selectedRows()
if len(index)>0:
pos=index[0].row()+1
else:
pos = self.rowCount()
if self.columnCount()==len(data):
self.insertRow(pos)
for i in range(0,len(data)):
self.setItem(pos,i,QTableWidgetItem(data[i]))
if len(data)>self.columnCount():
rows=int(len(data)/self.columnCount())
for ii in range(0,rows):
self.insertRow(pos)
for i in range(0,self.columnCount()):
self.setItem(pos,i,QTableWidgetItem(data[ii*tab.columnCount()+i]))
pos=pos+1
self.blockSignals(False)
def insert_row(self):
self.blockSignals(True)
index = self.selectionModel().selectedRows()
if len(index)>0:
pos=index[0].row()+1
else:
pos = self.rowCount()
self.insertRow(pos)
self.blockSignals(False)
return pos
def move_up(self):
ret=-1
if self.rowCount()==0:
return ret
self.blockSignals(True)
a=self.selectionModel().selectedRows()
if len(a)==1:
a=a[0].row()
b=a-1
if b<0:
return -1
#b=tab.rowCount()-1
ret=a
av=[]
for i in range(0,self.columnCount()):
av.append(str(self.get_value(a,i)))
bv=[]
for i in range(0,self.columnCount()):
bv.append(str(self.get_value(b,i)))
for i in range(0,self.columnCount()):
self.set_value(b,i,str(av[i]))
self.set_value(a,i,str(bv[i]))
self.selectRow(b)
self.blockSignals(False)
return ret
else:
return ret
def get_selected(self):
a=self.selectionModel().selectedRows()
if len(a)<=0:
return False
ret=[]
for ii in range(0,len(a)):
y=a[ii].row()
for i in range(0,self.columnCount()):
ret.append(str(self.get_value(y,i)))
return ret
def emit_remove_rows(self):
self.user_remove_rows.emit()
def remove(self):
self.blockSignals(True)
rows = []
for index in self.selectedIndexes():
row=index.row()
if row not in rows:
rows.append(row)
for row in sorted(rows, reverse=True):
self.removeRow(row)
self.blockSignals(False)
self.changed.emit()
|
"""Sigmoid function"""
import numpy as np
def sigmoid(matrix):
"""Applies sigmoid function to NumPy matrix"""
return 1 / (1 + np.exp(-matrix))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.