content
stringlengths 5
1.05M
|
|---|
pkgname = "perl-test-deep"
pkgver = "1.130"
pkgrel = 0
build_style = "perl_module"
hostmakedepends = ["gmake", "perl"]
makedepends = ["perl"]
depends = ["perl"]
pkgdesc = "Extremely flexible deep comparison"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Artistic-1.0-Perl OR GPL-1.0-or-later"
url = "https://metacpan.org/release/Test-Deep"
source = f"$(CPAN_SITE)/Test/Test-Deep-{pkgver}.tar.gz"
sha256 = "4064f494f5f62587d0ae501ca439105821ee5846c687dc6503233f55300a7c56"
|
#流れ
#0.データの処理->prepro.shで実行、dataから必要なデータを取り出しpickle化、word2id,id2vecの処理
#1.contexts,questionsを取り出しid化
#2.dataloaderからbatchを取り出し(ただのshuffleされたid列)、それに従いbatchを作成してtorch化
#3.モデルに入れてp1,p2(スタート位置、エンド位置を出力)
#4.predictはp1,p2それぞれのargmaxを取り、それと正解の位置を比較して出力する
import warnings
warnings.filterwarnings('ignore')
import sys
sys.path.append("../")
from tqdm import tqdm
import nltk
import pickle
import json
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from torch.autograd import Variable
import time
from model.seq2seq import Seq2Seq
from model.seq2seq2 import Seq2Seq2
from func.utils import Word2Id,BatchMaker,make_vec,make_vec_c,to_var,logger,get_args,data_loader,loss_calc,predict_calc,predict_sentence
from func import constants
import nltk
#epochあたりの学習
def model_handler(args,data,train=True):
start=time.time()
sentences=data["sentences"]
questions=data["questions"]
id2word=data["id2word"]
data_size=len(questions)
batch_size=args.test_batch_size
model.eval()
#batchをランダムな配列で指定する
batchmaker=BatchMaker(data_size,batch_size,train)
batches=batchmaker()
predict_rate=0
loss_sum=0
#生成した文を保存するリスト
predicts=[]
for i_batch,batch in tqdm(enumerate(batches)):
#これからそれぞれを取り出し処理してモデルへ
input_words=make_vec([sentences[i] for i in batch])
output_words=make_vec([questions[i] for i in batch])#(batch,seq_len)
#modelにデータを渡してpredictする
predict=model(input_words,output_words,train)#(batch,seq_len,vocab_size)
predict=predict_sentence(predict,output_words[:,1:],id2word)#(batch,seq_len)
predicts.extend(predict)
sentences=[" ".join([id2word[id] for id in sentence]) for sentence in sentences]#idから単語へ戻す
questions=[" ".join([id2word[id] for id in sentence[1:-1]]) for sentence in questions]#idから単語へ戻す
with open("data/predict_sentences.json","w")as f:
data={"sentences":sentences,
"questions":questions,
"predicts":predicts}
json.dump(data,f)
##start main
args=get_args()
test_data=data_loader(args,"data/test_data.json",first=True) if args.use_train_data==False else \
data_loader(args,"data/train_data.json",first=True)
model=Seq2Seq(args) if args.model_version==1 else \
Seq2Seq2(args)
if args.model_name!="":
param = torch.load("model_data/{}".format(args.model_name))
model.load_state_dict(param)
#start_epochが0なら最初から、指定されていたら学習済みのものをロードする
elif args.start_epoch>=1:
param = torch.load("model_data/epoch_{}_model.pth".format(args.start_epoch-1))
model.load_state_dict(param)
else:
args.start_epoch=0
#pytorch0.4より、OpenNMT参考
device=torch.device("cuda:{}".format(args.cuda_number) if torch.cuda.is_available() else "cpu")
model.to(device)
model_handler(args,test_data,train=False)
|
# -*- coding: future_fstrings -*-
import struct
import sys
from libptmalloc.frontend import printutils as pu
from libptmalloc.ptmalloc import heap_structure as hs
class malloc_state(hs.heap_structure):
"python representation of a struct malloc_state"
# XXX - we can probably get the version directly from the ptm argument?
def __init__(self, ptm, addr=None, mem=None, debugger=None, version=None):
"""
Parse malloc_state's data and initialize the malloc_state object
:param ptm: ptmalloc object
:param addr: address for a malloc_state where to read the structure's content from the debugger
:param mem: alternatively to "addr", provides the memory bytes of that malloc_state's content
:param debugger: the pydbg object
:param version: the glibc version
"""
super(malloc_state, self).__init__(ptm, debugger=debugger)
self.size = 0 # sizeof(struct malloc_state)
# malloc_state structure's fields, in this order for easy lookup
# Note: commented ones have been added at some point in glibc
# so are not present in older glibc versions
self.mutex = 0
self.flags = 0
# self.have_fastchunks = 0 # added in 2.27
self.fastbinsY = 0
self.top = 0
self.last_remainder = 0
self.bins = 0
self.binmap = 0
self.next = 0
self.next_free = 0
# self.attached_threads = 0 # added in 2.23
self.system_mem = 0
self.max_system_mem = 0
# helpers
self.fastbins_offset = 0
self.bins_offset = 0
if addr is None:
if mem is None:
pu.print_error("Please specify a struct malloc_state address")
self.initOK = False
return
self.address = None
else:
self.address = addr
if debugger is not None:
self.dbg = debugger
else:
pu.print_error("Please specify a debugger")
raise Exception('sys.exit()')
if version is None:
pu.print_error("Please specify a glibc version for malloc_state")
raise Exception('sys.exit()')
else:
self.version = version
self.initialize_sizes_and_offsets()
if mem is None:
# a string of raw memory was not provided, let's read it from the debugger
try:
self.mem = self.dbg.read_memory(addr, self.size)
except TypeError:
pu.print_error("Invalid address specified")
self.initOK = False
return
except RuntimeError:
pu.print_error("Could not read address {0:#x}".format(addr))
self.initOK = False
return
else:
if len(mem) < self.size:
pu.print_error("Provided memory size is too small for a malloc_state")
self.initOK = False
return
self.mem = mem[:self.size]
self.unpack_memory()
def initialize_sizes_and_offsets(self):
"""Initialize malloc_state's specific sizes based on the glibc version and architecture
"""
self.size_sz = self.dbg.get_size_sz()
if self.version < 2.15:
# XXX - seems 2.14 has same fields as 2.15 so likely we can support
# older easily...
pu.print_error("Unsupported version for malloc_state")
raise Exception('sys.exit()')
if self.version >= 2.15 and self.version < 2.23:
if self.size_sz == 4:
# sizeof(malloc_state) = 4+4+40+4+4+(254*4)+16+4+4+4+4
self.size = 0x450
elif self.size_sz == 8:
# sizeof(malloc_state) = 4+4+80+8+8+(254*8)+16+8+8+8+8
self.size = 0x888
self.fastbins_offset = 8
self.bins_offset = self.fastbins_offset + 12 * self.size_sz
elif self.version >= 2.23 and self.version <= 2.25:
# attached_threads added in 2.23
if self.size_sz == 4:
self.size = 0x454
elif self.size_sz == 8:
self.size = 0x890
self.fastbins_offset = 8
self.bins_offset = self.fastbins_offset + 12 * self.size_sz
elif self.version >= 2.27:
# have_fastchunks added in 2.27
if self.size_sz == 4:
# hax, empiric: +4 for padding added after fastbinsY[]
self.size = 0x458+4
self.fastbins_offset = 0xC
elif self.size_sz == 8:
self.size = 0x898
self.fastbins_offset = 0x10
self.bins_offset = self.fastbins_offset + 12 * self.size_sz
def unpack_memory(self):
"""Actually parse all the malloc_state's fields from the memory bytes (previously retrieved)
"""
if self.mem is None:
pu.print_error("No memory found")
raise Exception('sys.exit()')
self.mutex = self.unpack_variable("<I", 0)
self.flags = self.unpack_variable("<I", 4)
offset = 8
if self.version >= 2.27:
# have_fastchunks added in 2.27
if self.size_sz == 4:
fmt = "<I"
elif self.size_sz == 8:
fmt = "<Q"
# this is padded on 64-bit despite being int
self.have_fastchunks = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
if self.size_sz == 4:
fmt = "<10I"
elif self.size_sz == 8:
fmt = "<10Q"
self.fastbinsY = struct.unpack_from(fmt, self.mem, offset)
offset = offset + 10 * self.size_sz
if self.version >= 2.27:
if self.size_sz == 4:
# hax, empiric: +4 for padding added after fastbinsY[]
offset += 4
if self.size_sz == 4:
fmt = "<I"
elif self.size_sz == 8:
fmt = "<Q"
self.top = self.unpack_variable(fmt, offset)
offset += self.size_sz
self.last_remainder = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
if self.size_sz == 4:
fmt = "<254I"
elif self.size_sz == 8:
fmt = "<254Q"
self.bins = struct.unpack_from(fmt, self.mem, offset)
offset = offset + (254 * self.size_sz)
self.binmap = struct.unpack_from("<IIII", self.mem, offset)
offset = offset + 16
if self.size_sz == 4:
fmt = "<I"
elif self.size_sz == 8:
fmt = "<Q"
self.next = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.next_free = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
if self.version >= 2.23:
# attached_threads added in 2.23
self.attached_threads = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.system_mem = self.unpack_variable(fmt, offset)
offset = offset + self.size_sz
self.max_system_mem = self.unpack_variable(fmt, offset)
# XXX - this is probably broken as we haven't used it yet
def write(self, inferior=None):
"""Write malloc_state's data into memory using debugger
"""
if self.size_sz == 4:
mem = struct.pack(
"<275I",
self.mutex,
self.flags,
self.fastbinsY,
self.top,
self.last_remainder,
self.bins,
self.binmap,
self.next,
self.system_mem,
self.max_system_mem,
)
elif self.size_sz == 8:
mem = struct.pack(
"<II266QIIIIQQQ",
self.mutex,
self.flags,
self.fastbinsY,
self.top,
self.last_remainder,
self.bins,
self.binmap,
self.next,
self.system_mem,
self.max_system_mem,
)
if self.dbg is not None:
self.dbg.write_memory(self.address, mem)
elif inferior is not None:
self.inferior.write_memory(self.address, mem)
def __str__(self):
"""Pretty printer for the malloc_state
"""
return self.to_string()
def to_string(self, verbose=0, use_cache=False):
"""Pretty printer for the malloc_state supporting different level of verbosity
:param verbose: 0 for non-verbose. 1 for more verbose. 2 for even more verbose.
:param use_cache: True if we want to use the cached information from the cache object.
False if we want to fetch the data again
"""
title = "struct malloc_state @ 0x%x {" % self.address
txt = pu.color_title(title)
txt += "\n{:16} = ".format("mutex")
txt += pu.color_value("{:#x}".format(self.mutex))
txt += "\n{:16} = ".format("flags")
txt += pu.color_value("{:#x}".format(self.flags))
if self.version >= 2.27:
txt += "\n{:16} = ".format("have_fastchunks")
txt += pu.color_value("{:#x}".format(self.have_fastchunks))
txt += self.fastbins_to_string(verbose=verbose, use_cache=use_cache)
txt += "\n{:16} = ".format("top")
txt += pu.color_value("{:#x}".format(self.top))
txt += "\n{:16} = ".format("last_remainder")
txt += pu.color_value("{:#x}".format(self.last_remainder))
txt += self.bins_to_string(verbose=verbose, use_cache=use_cache)
if verbose > 0:
for i in range(len(self.binmap)):
txt += "\n{:16} = ".format("binmap[%d]" % i)
txt += pu.color_value("{:#x}".format(self.binmap[i]))
else:
txt += "\n{:16} = ".format("binmap")
txt += pu.color_value("{}".format("{...}"))
txt += "\n{:16} = ".format("next")
txt += pu.color_value("{:#x}".format(self.next))
txt += "\n{:16} = ".format("next_free")
txt += pu.color_value("{:#x}".format(self.next_free))
if self.version >= 2.23:
txt += "\n{:16} = ".format("attached_threads")
txt += pu.color_value("{:#x}".format(self.attached_threads))
txt += "\n{:16} = ".format("system_mem")
txt += pu.color_value("{:#x}".format(self.system_mem))
txt += "\n{:16} = ".format("max_system_mem")
txt += pu.color_value("{:#x}".format(self.max_system_mem))
return txt
def fastbins_to_string(self, show_status=False, verbose=2, use_cache=False):
"""Pretty printer for the malloc_state.fastbinsY[] array supporting different level of verbosity
:param verbose: 0 for non-verbose. 1 for more verbose. 2 for even more verbose.
:param use_cache: True if we want to use the cached information from the cache object.
False if we want to fetch the data again
"""
self.ptm.cache.update_fast_bins(show_status=show_status, use_cache=use_cache)
txt = ""
if verbose == 0:
txt += "\n{:16} = ".format("fastbinsY")
txt += pu.color_value("{}".format("{...}"))
return txt
elif verbose == 1:
show_empty = False
elif verbose >= 2:
show_empty = True
else:
raise Exception("Wrong verbosity passed to fastbins_to_string()")
for i in range(len(self.fastbinsY)):
count = len(self.ptm.cache.fast_bins[i])
if show_empty or count > 0:
txt += "\n{:16} = ".format("fastbinsY[%d]" % i)
txt += pu.color_value("{:#x}".format(self.fastbinsY[i]))
txt += " (sz {:#x})".format(self.ptm.fast_bin_size(i))
msg = "entry"
if count > 1:
msg = "entries"
if count == 0:
txt += " [EMPTY]"
else:
txt += " [{:#d} {}]".format(count, msg)
return txt
def bins_to_string(self, show_status=False, verbose=2, use_cache=False):
"""Pretty printer for the malloc_state.bins[] array supporting different level of verbosity
:param verbose: 0 for non-verbose. 1 for more verbose. 2 for even more verbose.
:param use_cache: True if we want to use the cached information from the cache object.
False if we want to fetch the data again
"""
self.ptm.cache.update_bins(show_status=show_status, use_cache=use_cache)
mstate = self.ptm.cache.mstate
txt = ""
if verbose == 0:
txt += "\n{:16} = ".format("bins")
txt += pu.color_value("{}".format("{...}"))
return txt
elif verbose == 1:
show_empty = False
elif verbose >= 2:
show_empty = True
else:
raise Exception("Wrong verbosity passed to bins_to_string()")
for i in range(len(self.ptm.cache.bins)):
count = len(self.ptm.cache.bins[i])
if show_empty or count > 0:
txt += "\n{:16} = ".format("bins[%d]" % i)
txt += pu.color_value("{:#x}, {:#x}".format(mstate.bins[i*2], mstate.bins[i*2+1]))
size = self.ptm.bin_size(i)
if i == self.ptm.bin_index_unsorted:
txt += " (unsorted)"
elif i <= self.ptm.bin_index_small_max:
txt += " (small sz 0x%x)" % size
elif i <= self.ptm.bin_index_large_max:
txt += " (large sz 0x%x)" % size
elif i == self.ptm.bin_index_uncategorized:
# size == None
txt += " (large uncategorized)"
msg = "entry"
if count > 1:
msg = "entries"
if count == 0:
txt += " [EMPTY]"
else:
txt += " [{:#d} {}]".format(count, msg)
return txt
|
import youtube_dl
import textwrap
import twitter
import json
import re
import os
import urllib.parse
import urllib.request
pathregex = re.compile("\\w{1,15}\\/(status|statuses)\\/\\d{2,20}")
generate_embed_user_agents = [
"Slackbot-LinkExpanding 1.0 (+https://api.slack.com/robots)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (compatible; Discordbot/2.0; +https://discordapp.com)",
"TelegramBot (like TwitterBot)",
"Mozilla/5.0 (compatible; January/1.0; +https://gitlab.insrt.uk/revolt/january)",
"test"
]
# Read config from config.json
f = open("config.json")
config = json.load(f)
f.close()
# Empty dummy link cache
link_cache = {}
# If method is set to API or Hybrid, attempt to auth with the Twitter API
if config['config']['method'] in ('api', 'hybrid'):
auth = twitter.oauth.OAuth(config['api']['access_token'], config['api']['access_secret'], config['api']['api_key'], config['api']['api_secret'])
twitter_api = twitter.Twitter(auth=auth)
def lambda_handler(event, context):
"""
Main Lambda event handler.
"""
path = event['rawPath']
user_agent = event['headers']['user-agent']
# Lambda includes a prefix / in the path, so strip it out
if path[0] == "/":
path = path[1:]
match = pathregex.search(path)
if match is not None:
twitter_url = path
if match.start() == 0:
twitter_url = "https://twitter.com/" + path
if user_agent in generate_embed_user_agents:
res = embed_video(twitter_url)
return res
else:
print(" ➤ [ R ] Redirect to " + twitter_url)
return redirect(twitter_url, 301)
else:
print('Invalid URL')
def render_template(name, **kwargs):
"""
Render a template with the given name and arguments.
"""
f = open(name)
template_contents = f.read()
for key, value in kwargs.items():
template_contents = template_contents.replace("{{ " + key + " }}", value)
f.close()
return {
'statusCode': 200,
'headers': {
'Content-Type': 'text/html'
},
'body': template_contents
}
def redirect(url, code):
return {
'statusCode': code,
'headers': {
'Location': url,
'Content-Type': 'text/html'
},
'body': '<html><body>Redirecting to <a href="' + url + '">' + url + '</a></body></html>'
}
def embed_video(video_link):
vnf = get_vnf_from_link_cache(video_link)
if vnf == None:
vnf = link_to_vnf(video_link)
add_vnf_to_link_cache(video_link, vnf)
return embed(video_link, vnf)
def video_info(url, tweet="", desc="", thumb="", uploader=""): # Return a dict of video info with default values
vnf = {
"tweet" :tweet,
"url" :url,
"description" :desc,
"thumbnail" :thumb,
"uploader" :uploader
}
return vnf
def link_to_vnf_from_api(video_link):
print(" ➤ [ + ] Attempting to download tweet info from Twitter API")
twid = int(re.sub(r'\?.*$','',video_link.rsplit("/", 1)[-1])) # gets the tweet ID as a int from the passed url
tweet = twitter_api.statuses.show(_id=twid, tweet_mode="extended")
# Check to see if tweet has a video, if not, make the url passed to the VNF the first t.co link in the tweet
if 'extended_entities' in tweet:
if 'video_info' in tweet['extended_entities']['media'][0]:
if tweet['extended_entities']['media'][0]['video_info']['variants'][-1]['content_type'] == "video/mp4":
url = tweet['extended_entities']['media'][0]['video_info']['variants'][-1]['url']
thumb = tweet['extended_entities']['media'][0]['media_url']
else:
url = tweet['extended_entities']['media'][0]['video_info']['variants'][-2]['url']
thumb = tweet['extended_entities']['media'][0]['media_url']
else:
url = re.findall(r'(https?://[^\s]+)', tweet['full_text'])[0]
thumb = "Non video link with url"
print(" ➤ [ NV ] Non video tweet, but has a link: " + url)
else:
url = re.findall(r'(https?://[^\s]+)', tweet['full_text'])[0]
thumb = "Non video link with url"
print(" ➤ [ NV ] Non video tweet, but has a link: " + url)
if len(tweet['full_text']) > 200:
text = textwrap.shorten(tweet['full_text'], width=200, placeholder="...")
else:
text = tweet['full_text']
vnf = video_info(url, video_link, text, thumb, tweet['user']['name'])
return vnf
def link_to_vnf_from_youtubedl(video_link):
print(" ➤ [ X ] Attempting to download tweet info via YoutubeDL: " + video_link)
with youtube_dl.YoutubeDL({'outtmpl': '%(id)s.%(ext)s'}) as ydl:
result = ydl.extract_info(video_link, download=False)
vnf = video_info(result['url'], video_link, result['description'].rsplit(' ',1)[0], result['thumbnail'], result['uploader'])
return vnf
def link_to_vnf(video_link): # Return a VideoInfo object or die trying
if config['config']['method'] == 'hybrid':
try:
return link_to_vnf_from_api(video_link)
except Exception as e:
print(" ➤ [ !!! ] API Failed")
print(e)
return link_to_vnf_from_youtubedl(video_link)
elif config['config']['method'] == 'api':
try:
return link_to_vnf_from_api(video_link)
except Exception as e:
print(" ➤ [ X ] API Failed")
print(e)
return None
elif config['config']['method'] == 'youtube-dl':
try:
return link_to_vnf_from_youtubedl(video_link)
except Exception as e:
print(" ➤ [ X ] Youtube-DL Failed")
print(e)
return None
else:
print("Please set the method key in your config file to 'api' 'youtube-dl' or 'hybrid'")
return None
def get_vnf_from_link_cache(video_link):
"""
TODO: Use Elasticache to store the VNFs
"""
return None
def add_vnf_to_link_cache(video_link, vnf):
"""
TODO: Use Elasticache to store the VNFs
"""
link_cache[video_link] = vnf
def embed(video_link, vnf):
print(" ➤ [ E ] Embedding " + vnf['url'])
if vnf['thumbnail'] == "Non video link with url":
print(" ➤ [ NV ] Redirecting Non Video Tweet to Twitter")
return redirect(vnf['url'], 301)
if vnf['url'].startswith('https://t.co') is not True:
desc = re.sub(r' http.*t\.co\S+', '', vnf['description'])
urlUser = urllib.parse.quote(vnf['uploader'])
urlDesc = urllib.parse.quote(desc)
urlLink = urllib.parse.quote(video_link)
return render_template(
'template.html',
vidlink=vnf['url'],
vidurl=vnf['url'],
desc=desc,
pic=vnf['thumbnail'],
user=vnf['uploader'],
video_link=video_link,
color=config['config']['color'],
appname=config['config']['appname'],
repo=config['config']['repo'],
url=config['config']['url'],
urlDesc=urlDesc,
urlUser=urlUser,
urlLink=urlLink
)
else:
return redirect(vnf['url'], 301)
def o_embed_gen(description, user, video_link):
out = {
"type":"video",
"version":"1.0",
"provider_name":"TwitFix",
"provider_url":"https://github.com/robinuniverse/twitfix",
"title":description,
"author_name":user,
"author_url":video_link
}
return out
|
# Custom metrics for the auto-encoder
import tensorflow as tf
# True positive on hard masks
class BinaryTP(tf.keras.metrics.Metric):
def __init__(self, name="binary_TP", **kwargs):
super(BinaryTP, self).__init__(name=name, **kwargs)
self.TP = self.add_weight(name="tp", initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.round(y_pred)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if not sample_weight == None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.TP.assign_add(tf.reduce_sum(values))
def result(self):
return self.TP
# False positive on hard masks
class BinaryFP(tf.keras.metrics.Metric):
def __init__(self, name="binary_FP", **kwargs):
super(BinaryFP, self).__init__(name=name, **kwargs)
self.FP = self.add_weight(name="fp", initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.round(y_pred)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if not sample_weight == None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.FP.assign_add(tf.reduce_sum(values))
def result(self):
return self.FP
# True negative on hard masks
class BinaryTN(tf.keras.metrics.Metric):
def __init__(self, name="binary_TN", **kwargs):
super(BinaryTN, self).__init__(name=name, **kwargs)
self.TN = self.add_weight(name="tn", initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.round(y_pred)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, False))
values = tf.cast(values, self.dtype)
if not sample_weight == None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.TN.assign_add(tf.reduce_sum(values))
def result(self):
return self.TN
# False negative on hard masks
class BinaryFN(tf.keras.metrics.Metric):
def __init__(self, name="binary_FN", **kwargs):
super(BinaryFN, self).__init__(name=name, **kwargs)
self.FN = self.add_weight(name="fn", initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.round(y_pred)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, False))
values = tf.cast(values, self.dtype)
if not sample_weight == None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.FN.assign_add(tf.reduce_sum(values))
def result(self):
return self.FN
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""MQTT Fiware Bridge
This microservice is optional for the NuvlaBox.
It takes as arguments:
- --mqtt-host: (mandatory) MQTT broker endpoint to connect to
- --mqtt-topic: (mandatory) MQTT topic to be subscribed
- --output-connector: (optional) From the available connectors, which one to use. If empty, all messages go to STDOUT
- --output-endpoint: (optional) Endpoint for the service where to push the messages to. Needs OUTPUT_CONNECTOR
This component connects to one and only one MQTT topic from one MQTT broker.
For each message received, a schema validation is performed against the FIWARE data models.
If the received data structure is FIWARE compliant, then forward the message to the OUTPUT_ENDPOINT.
There are several connectors available to choose from:
- disk: appends the messages to a text file located inside the container. The default path is /opt/nuvlabox/output.txt.
This path can be overwritten by OUTPUT_ENDPOINT
"""
import socket
import logging
import sys
import argparse
import fastjsonschema
import json
import pkg_resources
import paho.mqtt.client as mqtt
from abc import abstractmethod
socket.setdefaulttimeout(30)
class MqttFiwareBridge(object):
def __init__(self, ):
self.this_package_name = "mqtt_fiware_bridge"
self.set_logger(self.this_package_name)
self.log = logging.getLogger(self.this_package_name)
self.args = self.arguments(self.this_package_name).parse_args()
self.pkg_fiware_specs = 'fiware/specs'
self.fiware_schema_filename = 'schema.json'
self.fiware_models = self.map_all_fiware_models(self.pkg_fiware_specs)
@staticmethod
def set_logger(logger_name):
""" Configures logging """
# give logger a name: app
root = logging.getLogger(logger_name)
root.setLevel(logging.DEBUG)
# print to console
c_handler = logging.StreamHandler(sys.stdout)
c_handler.setLevel(logging.DEBUG)
# format log messages
formatter = logging.Formatter('%(levelname)s - %(funcName)s - %(message)s')
c_handler.setFormatter(formatter)
# add handlers
root.addHandler(c_handler)
@staticmethod
def arguments(description):
""" Builds a generic argparse
:return: parser
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--mqtt-host', dest='mqtt_host', metavar='MQTT BROKER HOSTNAME', required=True)
parser.add_argument('--mqtt-topic', dest='mqtt_topic', metavar='MQTT TOPIC', required=True)
parser.add_argument('--ignore-fiware-validation', dest='ignore_fiware_validation', action='store_true',
default=False)
parser.add_argument('--output-connector', dest='connector', default=None, metavar='CONNECTOR NAME')
parser.add_argument('--output-endpoint', dest='output_endpoint', default=None, metavar='ENDPOINT')
return parser
def map_all_fiware_models(self, search_at):
""" Generates a list of keypairs, containing the paths to all FIWARE data models
Example: {'Alert': 'fiware/specs/Alert'} is the path where you can find the schema.json for the data model Alert
"""
all_model_paths = {}
listed_under = pkg_resources.resource_listdir(self.this_package_name, search_at)
for item in listed_under:
new_folder = f'{search_at}/{item}'
if item == self.fiware_schema_filename:
all_model_paths[search_at.split('/')[-1]] = search_at
elif pkg_resources.resource_isdir(self.this_package_name, new_folder):
all_model_paths.update(self.map_all_fiware_models(new_folder))
else:
continue
return all_model_paths
def fiware_validate(self, data):
""" Takes the data and the list of fiware_data_type, and double checks against them all to
see if the schema is correct """
try:
msg = json.loads(data)
except json.decoder.JSONDecodeError:
self.log.exception(f"Message {data} is not in JSON form and thus cannot be validated")
return False
# message needs to contain a top-level "type" attribute identifying the data model.
# Otherwise it is straightaway NOT FIWARE compliant
if "type" not in msg:
self.log.warning(f"Message {msg} doesn't have a 'type' attribute, and thus is not FIWARE compliant")
return False
else:
fiware_data_type = msg['type']
if fiware_data_type in self.fiware_models:
schema = f"{self.fiware_models[fiware_data_type]}/{self.fiware_schema_filename}"
schema_json = json.loads(pkg_resources.resource_string(self.this_package_name, schema))
validate = fastjsonschema.compile(schema_json)
try:
validate(msg)
return True
except fastjsonschema.exceptions.JsonSchemaException:
self.log.exception(f'The {fiware_data_type} message is not compliant with FIWARE: {msg}')
return False
else:
self.log.warning(f"The field 'type' ({fiware_data_type}) in the message {msg} is not a valid FIWARE "
f"data type")
return False
@abstractmethod
def do_something(self, message):
# please redefine this function if you are inheriting this class
pass
def on_message(self, client, userdata, message):
new_message = str(message.payload.decode("utf-8"))
self.log.info(f"New message: {new_message}")
if self.args.ignore_fiware_validation:
self.do_something(new_message)
else:
self.log.info("Verifying FIWARE compliance...")
if self.fiware_validate(new_message):
self.log.info("Message is FIWARE compliant!")
self.do_something(new_message)
else:
self.log.warning("Message validation failed...")
def on_log(self, client, userdata, level, buf):
self.log.info(f"MQTT log: {buf}")
def connect(self):
""" Connect to the MQTT broker and starts listening forever """
self.log.info("Starting MQTT FIWARE bridge")
client = mqtt.Client(self.this_package_name)
try:
client.connect(self.args.mqtt_host)
except (socket.gaierror, socket.timeout):
self.log.exception(f"Cannot connect to the provided MQTT host {self.args.mqtt_host}")
client.on_message=self.on_message
# client.on_log=self.on_log
self.log.info(f"Subscribing to topic {self.args.mqtt_topic}")
client.subscribe(self.args.mqtt_topic)
client.loop_forever()
|
"""
Pygts: A Python Toolbox for gravity time series processing
Author: Jianing Gou(goujianing19@mails.ucas.ac.cn)
==================================================================
Pygts is an open-source project dedicated to provide a Python framework for
processing continuous gravity time series data(such as superconductivity gravimeter or PET/gPhone gravimeter).
It provides a lot of tools to deal with gravity application.
The goal of the DeepGravity project is to facilitate rapid application development
for gravity data.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
|
# coding=utf-8
from __future__ import absolute_import
import flask, pigpio
### (Don't forget to remove me)
# This is a basic skeleton for your plugin's __init__.py. You probably want to adjust the class name of your plugin
# as well as the plugin mixins it's subclassing from. This is really just a basic skeleton to get you started,
# defining your plugin as a template plugin, settings and asset plugin. Feel free to add or remove mixins
# as necessary.
#
# Take a look at the documentation on what other plugin mixins are available.
import octoprint.plugin
class TiltcamPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.WizardPlugin):
def on_settings_initialized(self):
self.status = ""
self.RANGE_X = [self._settings.get_int(["xRangeMin"]), self._settings.get_int(["xRangeMax"])]
self.RANGE_Y = [self._settings.get_int(["yRangeMin"]), self._settings.get_int(["yRangeMax"])]
refx = self._settings.get_int(["xstart"]) + self.RANGE_X[0]
refy = self._settings.get_int(["ystart"]) + self.RANGE_Y[0]
lastx = self._settings.get_int(["xlast"])
lasty = self._settings.get_int(["ylast"])
if (lastx is not None) and (lasty is not None) and (self._settings.get_boolean(["startingPoint"]) == False):
refx = lastx
refy = lasty
self.refPoint = [refx, refy]
self.pi = pigpio.pi()
try:
self.pi.set_servo_pulsewidth(self._settings.get_int(["xgpio"]), refx)
self.pi.set_servo_pulsewidth(self._settings.get_int(["ygpio"]), refy)
except:
self.status = "Pigpiod is not running!<br/>Please follow these <a href='https://github.com/fabiocaruso/OctoPrint-Tiltcam/blob/master/docs/pigpiod_installation.md'>instuctions</a> to setup."
##~~ WizardPlugin mixin
def is_wizard_required(self):
return (self._settings.get(["xgpio"]) is None) or (self._settings.get(["ygpio"]) is None)
def get_wizard_version(self):
return 1
##~~ SimpleApiPlugin mixin
def get_api_commands(self):
return dict(
move=["x", "y"],
setRefPoint=[],
setLastPoint=[]
)
def on_api_command(self, command, data):
if self.status != "":
return flask.jsonify(result=self.status)
gpioX = self._settings.get_int(["xgpio"])
gpioY = self._settings.get_int(["ygpio"])
if command == "move":
stepX = (self.RANGE_X[1] - self.RANGE_X[0]) * data["x"]
stepY = (self.RANGE_Y[1] - self.RANGE_Y[0]) * data["y"]
abX = self.refPoint[0] + stepX
abY = self.refPoint[1] + stepY
if self.RANGE_X[0] <= abX <= self.RANGE_X[1] and self.RANGE_Y[0] <= abY <= self.RANGE_Y[1]:
self.pi.set_servo_pulsewidth(gpioX, abX)
self.pi.set_servo_pulsewidth(gpioY, abY)
elif command == "setRefPoint":
currentX = self.pi.get_servo_pulsewidth(gpioX)
currentY = self.pi.get_servo_pulsewidth(gpioY)
self.refPoint = [currentX, currentY]
elif command == "setLastPoint":
currentX = self.pi.get_servo_pulsewidth(gpioX)
currentY = self.pi.get_servo_pulsewidth(gpioY)
self._settings.set_int(["xlast"], currentX)
self._settings.set_int(["ylast"], currentY)
self._settings.save()
def on_api_get(self, request):
return flask.jsonify(foo="bar")
##~~ SettingsPlugin mixin
def get_settings_defaults(self):
return dict(
xgpio=None,
ygpio=None,
startingPoint=True,
xstart=600,
ystart=600,
xRangeMin=500,
xRangeMax=2500,
yRangeMin=500,
yRangeMax=2500,
xlast=None,
ylast=None
)
def get_template_vars(self):
return dict(
xgpio=self._settings.get(["xgpio"]),
ygpio=self._settings.get(["ygpio"]),
startingPoint=self._settings.get(["startingPoint"]),
xstart=self._settings.get(["xstart"]),
ystart=self._settings.get(["ystart"]),
xRangeMin=self._settings.get(["xRangeMin"]),
xRangeMax=self._settings.get(["xRangeMax"]),
yRangeMin=self._settings.get(["yRangeMin"]),
yRangeMax=self._settings.get(["yRangeMax"]),
status=self.status
)
##~~ AssetPlugin mixin
def get_assets(self):
# Define your plugin's asset files to automatically include in the
# core UI here.
return dict(
js=["js/TiltCam.js"],
css=["css/TiltCam.css"],
less=["less/TiltCam.less"]
)
##~~ Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
TiltCam=dict(
displayName="Tiltcam Plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="fabiocaruso",
repo="OctoPrint-Tiltcam",
current=self._plugin_version,
# update method: pip
pip="https://github.com/fabiocaruso/OctoPrint-Tiltcam/archive/{target_version}.zip"
)
)
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "Tiltcam Plugin"
# Starting with OctoPrint 1.4.0 OctoPrint will also support to run under Python 3 in addition to the deprecated
# Python 2. New plugins should make sure to run under both versions for now. Uncomment one of the following
# compatibility flags according to what Python versions your plugin supports!
#__plugin_pythoncompat__ = ">=2.7,<3" # only python 2
#__plugin_pythoncompat__ = ">=3,<4" # only python 3
__plugin_pythoncompat__ = ">=2.7,<4" # python 2 and 3
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = TiltcamPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
|
import time
import json
from pyroute2.config import kernel
from pyroute2.netlink import genlmsg
from pyroute2.netlink.generic import GenericNetlinkSocket
from pyroute2.netlink.nlsocket import Marshal
QUOTA_NL_C_UNSPEC = 0
QUOTA_NL_C_WARNING = 1
class dquotmsg(genlmsg):
prefix = 'QUOTA_NL_A_'
nla_map = (('QUOTA_NL_A_UNSPEC', 'none'),
('QUOTA_NL_A_QTYPE', 'uint32'),
('QUOTA_NL_A_EXCESS_ID', 'uint64'),
('QUOTA_NL_A_WARNING', 'uint32'),
('QUOTA_NL_A_DEV_MAJOR', 'uint32'),
('QUOTA_NL_A_DEV_MINOR', 'uint32'),
('QUOTA_NL_A_CAUSED_ID', 'uint64'),
('QUOTA_NL_A_PAD', 'uint64'))
class MarshalDQuot(Marshal):
msg_map = {QUOTA_NL_C_UNSPEC: dquotmsg,
QUOTA_NL_C_WARNING: dquotmsg}
class DQuotSocket(GenericNetlinkSocket):
def __init__(self):
GenericNetlinkSocket.__init__(self)
self.marshal = MarshalDQuot()
if kernel[0] <= 2:
self.bind(groups=0xffffff)
else:
self.bind()
for group in self.mcast_groups:
self.add_membership(group)
def bind(self, groups=0, async=False):
GenericNetlinkSocket.bind(self, 'VFS_DQUOT', dquotmsg,
groups, None)
class DQuotNotifications:
def __init__(self, provider):
self.ds = DQuotSocket()
self.provider = provider
self.warnings = [
"All good",
"Inode hardlimit reached",
"Inode grace time expired",
"Inode softlimit reached",
"Block hardlimit reached",
"Block grace time expired",
"Block softlimit reached",
"Usage got below inode hardlimit",
"Usage got below inode softlimit",
"Usage got below block hardlimit",
"Usage got below block softlimit"
]
def run(self):
while True:
for msg in self.ds.get():
self.provider.send(json.dumps({
'uid': msg.get_attr('QUOTA_NL_A_EXCESS_ID'),
'message': self.warnings[msg.get_attr('QUOTA_NL_A_WARNING')] }))
time.sleep(0.1)
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('0.0.0.0', 2222))
sock.listen(1)
while True:
conn, addr = sock.accept()
while True:
data = conn.recv(1024)
if not data or 'close' in data.encode('utf9').lower():
break
conn.send(data)
|
from structs import Noun
from helper import hprint, k100
import bisect
import random
import time
class Lesson:
@property
def name(self):
raise NotImplementedError
@property
def teacher(self):
raise NotImplementedError
def __init__(self, *args, **kwargs):
self.classroom = args[0]
self.length = args[1]
self.start = args[2]
self.state = None
self.random = random.randint(0, 4)
# Prawdopodobieństwo wpisania spóźnienia
self.late = 50
def teacher_desc(self):
return "Przy biurku stoi pan profesor %s." % self.teacher.mianownik
def teacher_look(self):
return self.teacher_desc()
def enter(self, state):
if state.time < self.start + self.random:
if state.time >= self.start:
hprint("^Spóźniłeś się odrobinę, ale lekcja jeszcze się nie zaczęła.^ ")
hprint("Siadasz w ławce i czekasz na rozpoczęcie lekcji.^\n")
state.time = self.start + self.random
else:
hprint("Ogłaszasz swoje przybycie: Dzień dobry i przepraszam za spóźnienie.^\n", 'yellow')
hprint("Spóźniłeś się.^ ")
if k100(self.late):
hprint("Dostajesz spóźnienie do dziennika.^\n")
state.player.spoznienia += 1
else:
hprint("Upiekło ci się.^^\n")
def exit(self, state):
lek, brk = mtl(state.time)
if not brk:
return True
hprint("%s mówi: A ty gdzie się wybierasz, %s?^\n" % (self.teacher.mianownik,
state.player.imie.mianownik), 'yellow')
hprint("Wracasz się do ławki.\n")
return False
def quote(self):
return False
def koniec(self):
hprint("Zadzwonił dzwonek na koniec lekcji.")
self.state.time = table[self.start-1] + 45
def śpij(self, *args):
hprint("Nie masz siły, żeby żyć, a co dopiero, by uważać na lekcji. Zawieszasz wzrok na %s i zasypiasz…" %
self.teacher.miejscownik)
time.sleep(2)
if k100(30):
hprint("Nauczyciel zauważył twoją nieprzytomność. Dostaniesz ocenę niedostateczną.\n")
self.state.mod_dp(-random.randint(1, 5))
else:
hprint("Nauczyciel był zajęty przepytywaniem kogoś innego. Upiekło ci się!\n")
hprint("Co nieco odespałeś.\n")
self.state.mod_hp(random.randint(5, 10))
self.koniec()
śpij.name = "śpij"
def słuchaj(self, *args):
hprint("Nie udawaj, chce ci się spać. (Słuchanie niezaimplementowane.)\n")
return self.śpij()
słuchaj.name = "słuchaj"
def actions(self):
return [self.śpij, self.słuchaj]
class Matematyka(Lesson):
name = Noun("matematyka", "matematyki", n="matematyką")
teacher = Noun("Szymon Dobecki", "Szymona Dobeckiego", n="Szymonem Dobeckim",
msc="Szymonie Dobeckim")
def __init__(self, *args, **kwargs):
Lesson.__init__(self, *args, **kwargs)
def teacher_look(self):
return """Widzisz łysygo człowieka o bujnej fryzurze. Blask jego majestatu oświetla twoje wewnętrze \
matematyczne oko, wykluczając je z dziedziny.
Za uchem schowany ma krzywik, a z kieszeni wystaje okładka na której widnieje nieco starty napis *PAWŁOWSKI*."""
def quote(self):
base = ["bo tu jest taka ukryta kolumna",
"ty nie masz tu prawa głosu, ty weź dupę w troki i do roboty",
"ja to bym te wszystkie ferie zlikwidował",
"te uprawnienia laureata ci zostaja na nastepny rok jak nie zdasz?",
"ja cię nie przepuszczę do następnej klasy",
"i tu stosujemy taki myk",
"w tej klasie to ze 30% osób cokolwiek robi",
"jakbyśmy byli u mnie w szkole to bym cię wypieprzył z klasy",
"na ostatniej wejściówce nikt nie zapunktował",
"wyciągnijcie kartki, napiszemy sobie pracę własną",
"to na jutrzejszą lekcję obejrzyjcie sobie 2 wykłady, nie są długie, pół godziny każdy"
]
hprint("\nSzymon Dobecki mówi: (…) %s.\n" % random.choice(base), 'yellow')
return True
class WF(Lesson):
name = Noun("WF", "WF-u", n="WF-em")
teacher = Noun("Grzegorz Henicz", "Grzegorza Henicza", n="Grzegorzem Heniczem")
def __init__(self, *args, **kwargs):
Lesson.__init__(self, *args, **kwargs)
def teacher_look(self):
return """Z całych sił próbujesz dostrzec charakterystyczne cechy najlepszego wuefisty, lecz jego prędkość \
względem twojej jest zbyt wysoka.
(Interakcja z postacią jest niemożliwa.)"""
class Historia(Lesson):
name = Noun("historia", "historii", n="historią")
teacher = Noun("Dariusz Piasek", "Dariusza Piaska", n="Dariuszem Piaskiem")
def __init__(self, *args, **kwargs):
Lesson.__init__(self, *args, **kwargs)
class Informatyka(Lesson):
name = Noun("informatyka", "informatyki", n="informatyką")
teacher = Noun("Ryszard „Prezes” Szubartowski", b="Ryszardem „Prezesem” Szubartowskim",
d="Ryszarda „Prezesa” Szubartowskiego")
def __init__(self, *args, **kwargs):
Lesson.__init__(self, *args, **kwargs)
class TimeTable:
pass
"""timetominutes"""
def ttm(hr, min):
return hr * 60 + min
"""timetominutes"""
def mtt(min):
return "%02d:%02d" % (min // 60, min % 60)
table = [ttm(7, 45), ttm(8, 40), ttm(9, 35), ttm(10, 30), ttm(11, 25), ttm(12, 30), ttm(13, 25),
ttm(14, 25), ttm(15, 15)]
"""minutestolesson
(nrLekcji, Lekcja/Przerwa)"""
def mtl(time):
i = bisect.bisect_left(table, time + 1)
if i == 0:
return 0, False
return i, time - 1 - table[i - 1] < 44
def curless(timetable, hr, start):
i = start - 1
for lesson in timetable:
i += lesson.length
if i >= hr:
return lesson
class Lament1(TimeTable):
table = [
[Matematyka(9, 3, 1), WF(100, 1, 4),
Historia(9, 1, 5), Informatyka(23, 2, 6)],
[],
[],
[],
[]
]
length = [(1, 7), (0, 0) * 6]
def status(self, state):
lek, brk = mtl(state.time)
day = state.weekday
if self.length[day][1] == 0:
state.lesson = None
return "Dziś masz wolne od szkoły!"
elif lek < self.length[day][0]:
state.lesson = self.table[day][0]
return "Dziś zaczynasz %s o godzinie %s w sali nr %d." % (self.table[day][0].name.narzednik,
mtt(table[self.length[day - 1][0]]),
self.table[day][0].classroom)
elif (lek >= self.length[day][1] and brk) or (lek > self.length[day][1]):
state.lesson = None
return "Skończyłeś już lekcje na dzisiaj."
elif brk:
cur = curless(self.table[day], lek, self.length[day][0])
state.lesson = cur
return "W sali nr %d trwa lekcja %s." % \
(cur.classroom, cur.name.dopelniacz)
elif not brk:
cur = curless(self.table[day], lek + 1, self.length[day][0])
state.lesson = cur
return "Następna lekcja to %s w sali %d." % \
(cur.name.mianownik, cur.classroom)
return ""
|
# Copyright (c) 2019 Alliance for Sustainable Energy, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool, SaveTool, ZoomOutTool, ZoomInTool, BoxZoomTool, ResetTool
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Category20_20, Inferno256, Greys256, Set2_8
from datetime import datetime
import receiver
import os
import time
os.environ['TZ'] = 'UTC'
time.tzset()
# https://github.com/zeromq/pyzmq/blob/master/examples/serialization/serialsocket.py
FPS = 5
doc = curdoc()
first_time=True
source_dict_top = {}
def create_figure(title='Power P'):
hover = HoverTool(tooltips=[
('Name', '@name'),
("Time", "@time"),
("Date", "@date"),
(title, "@data")
])
f = figure(plot_width=700,
plot_height=500,
x_axis_type='datetime',
# x_axis_type='auto',
tools=[hover, SaveTool(), ZoomOutTool(), ZoomInTool(), BoxZoomTool(), ResetTool()],
title="Real-Time " + title + " Plot"
)
f.xaxis.axis_label = "Time"
f.yaxis.axis_label = title
f.xaxis.formatter = DatetimeTickFormatter(
seconds="%X",
# seconds=["%H:%M:%S"]
# seconds="%d %B %Y",
# minutes="%d %B %Y",
# hours="%d %b %Y",
# days="%d %b %Y",
# months="%d %b %Y",
# years="%d %b %Y"
)
return f
def update_mutli_line():
global first_time
# global source_dict
# print(len(receiver.buffer))
if len(receiver.buffer) > 0:
top_data = receiver.buffer.popleft()
print(top_data.keys())
print('first' in top_data.keys())
if 'first' in top_data.keys(): # reset graph
print(top_data['first'])
first_time = True
doc.clear()
# doc.remove_root()
return
if first_time:
print('Start creation')
figure_container = []
for key_name in top_data.keys():
data = top_data[key_name]
keys = list(data.keys())
print(keys)
# keys.remove('time')
temp_figure = create_figure(key_name)
color_spectrum = Category20_20
if len(keys) < 9:
color_spectrum = Set2_8
if len(keys) > 20:
color_spectrum = Greys256
source_dict_top[key_name] = {}
for name, color in zip(keys, color_spectrum):
source_dict_top[key_name][name] = ColumnDataSource({'time': [], 'date': [], 'data': [], 'name':[]})
# print('name ' + repr(name))
line_style = 'solid'
if 'forecast' in name.lower():
line_style = 'dashed'
temp_figure.line(x='time', y='data', source=source_dict_top[key_name][name], color=color, legend_label=name, name=name,
line_width=3, line_dash=line_style)
temp_figure.legend.location = "top_left"
temp_figure.legend.click_policy = "hide"
figure_container.append(temp_figure)
print('End creation')
first_time = False
doc.add_root(column(figure_container, name='Streaming'))
for key_name in top_data.keys():
data = top_data[key_name]
keys = list(data.keys())
# keys.remove('time')
for index, name in enumerate(keys):
first = data[name]['data']
source = source_dict_top[key_name][name]
if type(first) is list:
print(len(first))
print(len(data[name]['time']))
new_data = {
'time': list(map(datetime.fromtimestamp, data[name]['time'])),
'date': list(map(datetime.fromtimestamp, data[name]['time'])),
'data': first,
'name': [name]*len(first)
}
else:
# print(datetime.fromtimestamp(data[name]['time']*1000))
new_data = {
'time': [datetime.fromtimestamp(data[name]['time'])],
'date': [datetime.fromtimestamp(data[name]['time']).strftime("%Y-%m-%d %H:%M:%S")],
# 'date': [datetime.fromtimestamp(data[name]['time'])],
'data': [first],
'name': [name]
}
source.stream(new_data, rollover=360)
# doc.add_periodic_callback(update_mutli_line, 1000/FPS)
doc.add_periodic_callback(update_mutli_line, 1000) # Slow down for more plots
|
def first(array, element):
low = 0
high = len(array) - 1
ans = -1
while low <= high:
mid = (low + high) // 2
if array[mid] > element:
high = mid - 1
elif array[mid] < element:
low = mid + 1
else:
ans = mid
high = mid - 1
return ans
def last(array, element):
low = 0
high = len(array) - 1
ans = -1
while low <= high:
mid = (low + high) // 2
if array[mid] > element:
high = mid - 1
elif array[mid] < element:
low = mid + 1
else:
ans = mid
low = mid + 1
return ans
def count(array, element):
return last(array, element) - first(array, element) + 1
def main():
array = [1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6, 6, 7, 9, 10, 10, 10, 11, 11, 11]
print(count(array, 4))
if __name__ == "__main__":
main()
|
from rl.old_lib.ActionDistribution import ActionDistribution
class Policy:
"""
Selects an appropriate action.
"""
def get_action_distribution(self, state, action_space) -> ActionDistribution:
"""
Chooses one or several actions in the given action space, given an input state.
:param state: The environment state.
:param action_space: The current action space to select take actions from.
:return: A probability distribution over the actions
"""
raise NotImplementedError
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.verification import context
from tests.unit import test
@context.configure("fake_verifier_context", order=314)
class FakeContext(context.VerifierContext):
def cleanup(self):
pass
def setup(self):
pass
class VerifierContextTestCase(test.TestCase):
def test__meta_get(self):
data = {"key1": "value1", "key2": "value2"}
for k, v in data.items():
FakeContext._meta_set(k, v)
for k, v in data.items():
self.assertEqual(v, FakeContext._meta_get(k))
self.assertTrue(FakeContext.is_hidden())
class ContextManagerTestCase(test.TestCase):
@mock.patch("rally.verification.context.VerifierContext")
def test_validate(self, mock_verifier_context):
config = {"ctx1": mock.Mock(), "ctx2": mock.Mock()}
context.ContextManager.validate(config)
self.assertEqual([mock.call(k, allow_hidden=True)
for k, v in config.items()],
mock_verifier_context.get.call_args_list)
self.assertEqual(
[mock.call(v) for k, v in config.items()],
mock_verifier_context.get.return_value.validate.call_args_list)
|
import sqlite3
from sqlite3 import Connection
class SQLite:
_conn: Connection
def __init__(self, fileloc: str):
self._conn = sqlite3.connect(fileloc)
self._ensure_created()
def close(self):
self._conn.close()
def _ensure_created(self):
self._conn.execute(
'CREATE TABLE IF NOT EXISTS `whitelist` (' +
' `id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,' +
' `discordId` VARCHAR(22),' +
' `mcId` VARCHAR(32)' +
');')
self._conn.execute(
'CREATE TABLE IF NOT EXISTS `guilds` (' +
' `id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,' +
' `guildId` VARCHAR(22),' +
' `adminRoleId` VARCHAR(32),' +
' `statusChannelId` VARCHAR(32),' +
' `statusMessageId` VARCHAR(32),' +
' `disabled` BOOLEAN' +
');')
self._conn.commit()
def get_whitelist(self) -> dict:
res = self._conn.execute(
'SELECT `discordId`, `mcId` FROM `whitelist`;')
res_map = {}
for e in res.fetchall():
res_map[e[0]] = e[1]
return res_map
def get_whitelist_by_mc_id(self, mc_id: str) -> (str, str):
res = self._conn.execute(
'SELECT `discordId`, `mcId` FROM `whitelist` WHERE ' +
'`mcId` = ?;', (mc_id,))
row = res.fetchone()
if row is None:
return (None, None)
print(row)
return row
def get_whitelist_by_discord_id(self, discord_id: str) -> (str, str):
res = self._conn.execute(
'SELECT `discordId`, `mcId` FROM `whitelist` WHERE ' +
'`discordId` = ?;', (discord_id,))
row = res.fetchone()
if row is None:
return (None, None)
return row
def set_witelist(self, discord_id: str, mc_id: str) -> str:
dc_id, old_mc_id = self.get_whitelist_by_discord_id(discord_id)
if dc_id is None:
self._conn.execute(
'INSERT INTO `whitelist` (`discordId`, `mcId`) VALUES ' +
'(?, ?);', (discord_id, mc_id))
else:
self._conn.execute(
'UPDATE `whitelist` SET `mcId` = ? WHERE ' +
'`discordId` = ?;', (mc_id, discord_id))
self._conn.commit()
return old_mc_id
def rem_witelist(self, ident: str):
self._conn.execute(
'DELETE FROM `whitelist` WHERE ' +
'`discordId` = ? OR `mcId` = ?;', (ident, ident))
self._conn.commit()
def get_admin_role(self, guild_id: str) -> str:
res = self._conn.execute(
'SELECT `adminRoleId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
role = res.fetchone()
return role[0] if role else None
def set_admin_role(self, guild_id: str, role_id: str):
res = self._conn.execute(
'SELECT `guildId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
res_guild_id = res.fetchone()
if res_guild_id is None:
self._conn.execute(
'INSERT INTO `guilds` (`guildId`, `adminRoleId`) VALUES ' +
'(?, ?);', (guild_id, role_id))
else:
self._conn.execute(
'UPDATE `guilds` SET `adminRoleId` = ? WHERE ' +
'`guildId` = ?;', (role_id, guild_id))
self._conn.commit()
def get_status_channel(self, guild_id: str) -> str:
res = self._conn.execute(
'SELECT `statusChannelId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
chan = res.fetchone()
return chan[0] if chan else None
def set_status_channel(self, guild_id: str, channel_id: str):
res = self._conn.execute(
'SELECT `guildId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
res_guild_id = res.fetchone()
if res_guild_id is None:
self._conn.execute(
'INSERT INTO `guilds` (`guildId`, `statusChannelId`) VALUES ' +
'(?, ?);', (guild_id, channel_id))
else:
self._conn.execute(
'UPDATE `guilds` SET `statusChannelId` = ? WHERE ' +
'`guildId` = ?;', (channel_id, guild_id))
self._conn.commit()
def get_status_message(self, guild_id: str) -> str:
res = self._conn.execute(
'SELECT `statusMessageId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
msg = res.fetchone()
return msg[0] if msg else None
def set_status_message(self, guild_id: str, message_id: str):
res = self._conn.execute(
'SELECT `guildId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
res_guild_id = res.fetchone()
if res_guild_id is None:
self._conn.execute(
'INSERT INTO `guilds` (`guildId`, `statusMessageId`) VALUES ' +
'(?, ?);', (guild_id, message_id))
else:
self._conn.execute(
'UPDATE `guilds` SET `statusMessageId` = ? WHERE ' +
'`guildId` = ?;', (message_id, guild_id))
self._conn.commit()
def get_disabled(self, guild_id: str) -> bool:
res = self._conn.execute(
'SELECT `disabled` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
state = res.fetchone()
return state[0] if state else False
def set_disabled(self, guild_id: str, disabled: bool):
res = self._conn.execute(
'SELECT `guildId` from `guilds` WHERE ' +
'`guildId` = ?;', (guild_id,))
res_guild_id = res.fetchone()
if res_guild_id is None:
self._conn.execute(
'INSERT INTO `guilds` (`guildId`, `disabled`) VALUES ' +
'(?, ?);', (guild_id, disabled))
else:
self._conn.execute(
'UPDATE `guilds` SET `disabled` = ? WHERE ' +
'`guildId` = ?;', (disabled, guild_id))
self._conn.commit()
|
from unittest import TestCase
import psycopg2
from mygeocoder import Confidence, TigerGeocoder
class TigerGeocoderTests(TestCase):
"""
NOTE: Expects the geocoder database to be up and running.
"""
def setUp(self):
self.geocoder = TigerGeocoder("dbname=geocoder user=eric", raise_shared_mem_exc=True)
def tearDown(self):
self.geocoder.close()
def assertGeocode(self, address, expected_confidence):
result = self.geocoder.geocode(address)
self.assertEquals(expected_confidence, result['confidence'])
if result['confidence'] in [Confidence.EXCELLENT, Confidence.FAIR]:
self.assertEquals("1724 Massachusetts Ave NW, Washington DC 20036", result['address'])
self.assertEquals(-77.0393236499317, result['lon'])
self.assertEquals(38.9081098579959, result['lat'])
elif result['confidence'] == Confidence.POOR:
self.assertTrue(result['address'] is not None)
self.assertTrue(result['lat'] is not None)
self.assertTrue(result['lon'] is not None)
else:
self.assertEquals(None, result['address'])
self.assertEquals(None, result['lat'])
self.assertEquals(None, result['lon'])
def test_excellent_geocode(self):
self.assertGeocode("1724 Massachusetts Ave NW, Washington DC", Confidence.EXCELLENT)
def test_fair_geocode(self):
self.assertGeocode("1724 Massachusetts Ave N, Washington DC", Confidence.FAIR)
def test_poor_geocode(self):
self.assertGeocode("1724 Massach Ave, Washington DC", Confidence.POOR)
def test_no_match_geocode(self):
self.assertGeocode("", Confidence.NO_MATCH)
def test_shared_mem_exception_raised(self):
self.geocoder = TigerGeocoder("dbname=geocoder user=eric", raise_shared_mem_exc=True)
self.assertRaises(psycopg2.OperationalError, self.geocoder.geocode, "-1 Mass Ave, Washington DC")
def test_shared_mem_no_match(self):
self.geocoder = TigerGeocoder("dbname=geocoder user=eric", raise_shared_mem_exc=False)
self.assertGeocode("-1 Mass Ave, Washington DC", Confidence.NO_MATCH)
def test_shared_mem_no_match_twice(self):
self.geocoder = TigerGeocoder("dbname=geocoder user=eric", raise_shared_mem_exc=False)
self.assertGeocode("-1 Mass Ave, Washington DC", Confidence.NO_MATCH)
self.assertGeocode("-1 Mass Ave, Washington DC", Confidence.NO_MATCH)
|
######################################################################
# Flat Shader
# This shader applies the given model view matrix to the vertices,
# and uses a uniform color value.
flatShader = (['''
uniform mat4 mvpMatrix;
attribute vec4 vVertex;
void main(void)
{
gl_Position = mvpMatrix * vVertex;
}'''],
['''
//precision mediump float;
uniform vec4 vColor;
void main(void)
{
gl_FragColor = vColor;
}'''])
######################################################################
# Point light, diffuse lighting only
pointLightDiff = (['''
uniform mat4 mvMatrix;
uniform mat4 pMatrix;
uniform vec3 vLightPos;
uniform vec4 vColor;
attribute vec4 vVertex;
attribute vec3 vNormal;
varying vec4 vFragColor;
void main(void)
{
mat3 mNormalMatrix;
mNormalMatrix[0] = normalize(mvMatrix[0].xyz);
mNormalMatrix[1] = normalize(mvMatrix[1].xyz);
mNormalMatrix[2] = normalize(mvMatrix[2].xyz);
vec3 vNorm = normalize(mNormalMatrix * vNormal);
vec4 ecPosition;
vec3 ecPosition3;
ecPosition = mvMatrix * vVertex;
ecPosition3 = ecPosition.xyz /ecPosition.w;
vec3 vLightDir = normalize(vLightPos - ecPosition3);
float fDot = max(0.0, dot(vNorm, vLightDir));
vFragColor.rgb = vColor.rgb * fDot;
vFragColor.a = vColor.a;
// vFragColor = vColor;
mat4 mvpMatrix;
mvpMatrix = pMatrix * mvMatrix;
gl_Position = mvpMatrix * vVertex;
}'''],
['''
//precision mediump float;
varying vec4 vFragColor;
void main(void)
{
gl_FragColor = vFragColor;
}'''])
######################################################################
# ADS Gouraud shader
ADSGouraud = (['''
uniform mat4 mvMatrix;
uniform mat4 pMatrix;
uniform vec3 vLightPos;
uniform vec4 ambientColor;
uniform vec4 diffuseColor;
uniform vec4 specularColor;
uniform float shininess;
uniform vec4 lightColor;
uniform float fConstantAttenuation;
uniform float fLinearAttenuation;
uniform float fQuadraticAttenuation;
attribute vec4 vVertex;
attribute vec3 vNormal;
varying vec4 vVaryingColor;
void main(void)
{
mat3 mNormalMatrix;
mNormalMatrix[0] = normalize(mvMatrix[0].xyz);
mNormalMatrix[1] = normalize(mvMatrix[1].xyz);
mNormalMatrix[2] = normalize(mvMatrix[2].xyz);
// Get surface normal in eye coordinates
vec3 vEyeNormal = mNormalMatrix * vNormal;
// Get vertex position in eye coordinates
vec4 vPosition4 = mvMatrix * vVertex;
vec3 vPosition3 = vPosition4.xyz /vPosition4.w;
// Get vector to light source
vec3 vLightDir = normalize(vLightPos - vPosition3);
// Get distance to light source
float distanceToLight = length(vLightPos-vPosition3);
// float attenuation = fConstantAttenuation / ((1.0 + fLinearAttenuation * distanceToLight) * (1.0 + fQuadraticAttenuation * distanceToLight * distanceToLight));
float attenuation = 1.0 / (fConstantAttenuation + fLinearAttenuation * distanceToLight + fQuadraticAttenuation * distanceToLight * distanceToLight);
vec4 attenuatedLight = lightColor * attenuation;
// float attenuation = 1.0f;
// Dot product gives us diffuse intensity
float diff = max(0.0, dot(vEyeNormal, vLightDir));
// Multiply intensity by diffuse color, force alpha to 1.0
vVaryingColor = attenuatedLight * diffuseColor * diff;
// Add in ambient light
vVaryingColor += ambientColor;
// Specular light
vec3 vReflection = normalize(reflect(-vLightDir, vEyeNormal));
float spec = max(0.0, dot(vEyeNormal, vReflection));
if(diff != 0.0) {
float fSpec = pow(spec, shininess);
vVaryingColor.rgb += attenuatedLight.rgb * vec3(fSpec, fSpec, fSpec);
}
// Don't forget to transform the geometry
mat4 mvpMatrix = pMatrix * mvMatrix;
gl_Position = mvpMatrix * vVertex;
}'''],
['''
//precision mediump float;
varying vec4 vVaryingColor;
void main(void)
{
gl_FragColor = vVaryingColor;
}'''])
##############################################################################
# Simple phong shader by Jerome GUINOT aka 'JeGX' - jegx [at] ozone3d
# [dot] net see
# http://www.ozone3d.net/tutorials/glsl_lighting_phong.php
simplePhong = (['''
varying vec3 normal, lightDir0, eyeVec;
void main()
{
normal = gl_NormalMatrix * gl_Normal;
vec3 vVertex = vec3(gl_ModelViewMatrix * gl_Vertex);
lightDir0 = vec3(gl_LightSource[0].position.xyz - vVertex);
eyeVec = -vVertex;
gl_Position = ftransform();
}
'''],
['''
uniform vec4 diffuse, specular, ambient;
uniform float shininess;
varying vec3 normal, lightDir0, eyeVec;
void main (void)
{
vec4 final_color =
(gl_FrontLightModelProduct.sceneColor * ambient)
+ (gl_LightSource[0].ambient * ambient);
vec3 N = normalize(normal);
vec3 L0 = normalize(lightDir0);
float lambertTerm0 = dot(N,L0);
if(lambertTerm0 > 0.0)
{
final_color += gl_LightSource[0].diffuse * diffuse * lambertTerm0;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L0, N);
float spec = pow(max(dot(R, E), 0.0), shininess);
final_color += gl_LightSource[0].specular * specular * spec;
}
gl_FragColor = final_color;
}
'''])
##############################################################################
# ADS Phong shader
ADSPhong = (['''
attribute vec4 vVertex;
attribute vec3 vNormal;
uniform mat4 mvMatrix;
uniform mat4 pMatrix;
uniform vec3 vLightPos;
// Color to fragment program
varying vec3 vVaryingNormal;
varying vec3 vVaryingLightDir;
varying float distanceToLight;
//varying float spotEffect;
void main(void)
{
mat3 normalMatrix;
normalMatrix[0] = normalize(mvMatrix[0].xyz);
normalMatrix[1] = normalize(mvMatrix[1].xyz);
normalMatrix[2] = normalize(mvMatrix[2].xyz);
// Get surface normal in eye coordinates
vVaryingNormal = normalMatrix * vNormal;
// Get vertex position in eye coordinates
vec4 vPosition4 = mvMatrix * vVertex;
vec3 vPosition3 = vPosition4.xyz /vPosition4.w;
// Get vector to light source
vVaryingLightDir = normalize(vLightPos - vPosition3);
// Get distance to light source
distanceToLight = length(vLightPos-vPosition3);
// spotEffect = dot(normalize(gl_LightSource[0].spotDirection), normalize(-lightDir));
// spotEffect = dot(vec3(0.0, 0.0, -1.0), normalize(-vVaryingLightDir));
// Don't forget to transform the geometry
mat4 mvpMatrix = pMatrix * mvMatrix;
gl_Position = mvpMatrix * vVertex;
}'''],
['''
precision mediump float;
uniform vec4 ambientColor;
uniform vec4 diffuseColor;
uniform vec4 specularColor;
uniform float shininess;
uniform vec4 lightColor;
uniform float fConstantAttenuation;
uniform float fLinearAttenuation;
uniform float fQuadraticAttenuation;
varying vec3 vVaryingNormal;
varying vec3 vVaryingLightDir;
varying float distanceToLight;
//varying float spotEffect;
void main(void)
{
// float attenuation = 1.0 / (fConstantAttenuation + fLinearAttenuation * distanceToLight + fQuadraticAttenuation * distanceToLight * distanceToLight);
float attenuation = fConstantAttenuation / ((1.0 + fLinearAttenuation * distanceToLight) * (1.0 + fQuadraticAttenuation * distanceToLight * distanceToLight));
// attenuation *= pow(spotEffect, 0.15);
// float attenuation = 1.0;
vec4 attenuatedLight = lightColor * attenuation;
attenuatedLight.a = 1.0;
// Dot product gives us diffuse intensity
float diff = max(0.0, dot(normalize(vVaryingNormal), normalize(vVaryingLightDir)));
// Multiply intensity by diffuse color, force alpha to 1.0
gl_FragColor = attenuatedLight * (diffuseColor * diff + ambientColor);
// Specular light
vec3 vReflection = normalize(reflect(-normalize(vVaryingLightDir), normalize(vVaryingNormal)));
float spec = max(0.0, dot(normalize(vVaryingNormal), vReflection));
// If diffuse light is zero, do not even bother with the pow function
if(diff != 0.0) {
float fSpec = pow(spec, shininess);
gl_FragColor.rgb += attenuatedLight.rgb * vec3(fSpec, fSpec, fSpec);
}
// For some reaseons, without following multiplications, all scenes exported from Blender are dark.
// Need to investigate the real reason. For now, it is just workaround to make scene brighter.
// gl_FragColor.rgb *= vec3(5.5, 5.5, 5.5);
// gl_FragColor.rgb *= vec3(2.5, 2.5, 2.5);
// gl_FragColor.rgb += vec3(0.3, 0.3, 0.3);
// gl_FragColor = diffuseColor + ambientColor;
}'''])
######################################################################
# Point light (Diffuse only), with texture (modulated)
texturePointLightDiff = (['''
uniform mat4 mvMatrix;
uniform mat4 pMatrix;
uniform vec3 vLightPos;
uniform vec4 vColor;
attribute vec4 vVertex;
attribute vec3 vNormal;
varying vec4 vFragColor;
attribute vec2 vTexCoord0;
varying vec2 vTex;
void main(void)
{
mat3 mNormalMatrix;
mNormalMatrix[0] = normalize(mvMatrix[0].xyz);
mNormalMatrix[1] = normalize(mvMatrix[1].xyz);
mNormalMatrix[2] = normalize(mvMatrix[2].xyz);
vec3 vNorm = normalize(mNormalMatrix * vNormal);
vec4 ecPosition;
vec3 ecPosition3;
ecPosition = mvMatrix * vVertex;
ecPosition3 = ecPosition.xyz /ecPosition.w;
vec3 vLightDir = normalize(vLightPos - ecPosition3);
float fDot = max(0.0, dot(vNorm, vLightDir));
vFragColor.rgb = vColor.rgb * fDot;
vFragColor.a = vColor.a;
vTex = vTexCoord0;
mat4 mvpMatrix;
mvpMatrix = pMatrix * mvMatrix;
gl_Position = mvpMatrix * vVertex;
}'''],
['''
precision mediump float;
varying vec4 vFragColor;
varying vec2 vTex;
uniform sampler2D textureUnit0;
void main(void)
{
gl_FragColor = texture2D(textureUnit0, vTex);
if(gl_FragColor.a < 0.1)
discard;
/* if(gl_FragColor.a < 1.0)
{
gl_FragColor.r = 1.0 - gl_FragColor.a;
gl_FragColor.g = 0;
gl_FragColor.b = 0;
gl_FragColor.a = 1.0;
}*/
// if(vFragColor.a != 0.0)
// gl_FragColor *= vFragColor;
// else
// discard;
// gl_FragColor = texture2D(textureUnit0, vTex);
// gl_FragColor = vFragColor;
}'''])
######################################################################
# Phong with textures
texturePhong = (['''
varying vec3 normal, lightDir0, eyeVec;
void main()
{
normal = gl_NormalMatrix * gl_Normal;
vec3 vVertex = vec3(gl_ModelViewMatrix * gl_Vertex);
lightDir0 = vec3(gl_LightSource[0].position.xyz - vVertex);
eyeVec = -vVertex;
gl_Position = ftransform();
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
}
'''],
['''
varying vec3 normal, lightDir0, eyeVec;
uniform sampler2D my_color_texture[1]; //0 = ColorMap
void main (void)
{
vec4 texColor = texture2D(my_color_texture[0], gl_TexCoord[0].st);
vec4 final_color;
/* final_color = (gl_FrontLightModelProduct.sceneColor * vec4(texColor.rgb,1.0)) +
gl_LightSource[0].ambient * vec4(texColor.rgb,1.0);*/
final_color = (gl_FrontLightModelProduct.sceneColor * vec4(texColor.rgb,1.0)) +
vec4(texColor.rgb,1.0);
vec3 N = normalize(normal);
vec3 L0 = normalize(lightDir0);
float lambertTerm0 = dot(N,L0);
if(lambertTerm0 > 0.0)
{
final_color += gl_LightSource[0].diffuse *
gl_FrontMaterial.diffuse *
lambertTerm0;
vec3 E = normalize(eyeVec);
vec3 R = reflect(-L0, N);
float specular = pow( max(dot(R, E), 0.0),
gl_FrontMaterial.shininess );
final_color += gl_LightSource[0].specular *
gl_FrontMaterial.specular *
specular;
}
gl_FragColor = final_color;
}
'''])
|
"""
Definition of views.
"""
from django.shortcuts import render
from django.http import HttpRequest
from django.template import RequestContext
from datetime import datetime
import app.models
# import random
from django.http import HttpResponse
# import getOptions
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
homeResponce = render(
request,
'app/index.html',
context_instance=RequestContext(
request,
{
'title': 'Home Page',
'year': datetime.now().year,
'version': "1.0"
})
)
if 'playerID' in request.COOKIES:
homeResponce.delete_cookie('playerID')
return homeResponce
def playerLogin(request):
assert isinstance(request, HttpRequest)
"""PREPARE REQUEST"""
context = RequestContext(
request,
{
'title': 'Player Login:',
}
)
return render(
request,
'app/playerLogin.html',
context_instance=context
)
def leaderboard(request):
assert isinstance(request, HttpRequest)
print(app.models.Player.objects.all())
list = app.models.Player.objects.all().filter(QuestionsAnswered__gte=49).order_by('CorrectPercentage').reverse()
# list = ["one", "two", "three", "four"]
for i in range(len(list)):
list[i].Position = i + 1
list[i].ImgNumber = str(list[i].ImgNumber)
list[i].CorrectPercentage = str(list[i].CorrectPercentage*100)[:4] + "%"
context = RequestContext(
request,
{
'title': 'Leaderboard',
'list': list,
}
)
leaderboardRender = render(
request,
'app/leaderboard.html',
context_instance=context
)
return leaderboardRender
# THIS IS MY FUNCTION##
def getColor(n):
if n > 80:
return "green"
elif n > 70:
return "orange"
else:
return "red"
def test(request):
"""Renders the question page"""
# gets the img to be used from the database by getting the first in a
# random arrangemet of all items in the db
lowestNumber = app.models.Person.objects.order_by('NumShown').first().NumShown
# print("lowest:", lowestNumber)
answer = app.models.Person.objects.order_by('?').filter(NumShown=lowestNumber).first()
answer.NumShown += 1
answer.save()
# this is not perfect for a few reasons, but I tried to remove duplicates,
# and it works almost always
# if "alreadyUsed" in request.COOKIES:
# alreadyUsedList = request.COOKIES["alreadyUsed"].split(',')
# print(alreadyUsedList)
# print(answer.ImgNumber)
# while str(answer.ImgNumber) in alreadyUsedList:
# answer = app.models.Person.objects.order_by('?').first()
"""CHECK CORRECT"""
# this assigns the last answer and the last correct value to a variable.
# this is done here because I am lazy and did not do this properly
previousAnswer = request.POST.get("answer", default=None)
previousCorrect = request.POST.get("correctImgId", default=None)
# this is defining the responce that the server will send to the user
"""
The values that I send to the user:
title: the title that appears in the browser
imgNum: the location of the file of the image on the server's disk
numQuestion: question x out of 25
correctOption: this is a debug bit that says what the actual answer is
correctImgId: redundant, but I used it for debug
the rest are debug things
"""
assert isinstance(request, HttpRequest)
"""SERVER RESPONCE CREATION"""
serverResponce = render(
request,
'app/question.html',
context_instance=RequestContext(
request,
{
'title': 'Quiz',
'imgNum': str(answer.ImgNumber),
'numQuestion': int(request.GET.get("q", default=1)),
'correctImgId': answer.ImgNumber,
'previousPersonId': request.POST.get("correctImgId", default=None),
'version': "1.0",
}))
"""COOKIE STUFF"""
# if the cookie does not exist, create one, for all of the three cookies that I used
# it also wipes the cookies if you are starting a new quiz, even if you do
# not finish the last one
if "playerNumCorrect" not in request.COOKIES or request.GET.get("q", default=0) == 0:
serverResponce.set_cookie("playerNumCorrect", value=0)
if "alreadyUsed" not in request.COOKIES or request.GET.get("q", default=0) == 0:
serverResponce.set_cookie("alreadyUsed", value="")
if "incorrectAnswers" not in request.COOKIES or request.GET.get("q", default=0) == 0:
serverResponce.set_cookie("incorrectAnswers", value="")
if "playerID" not in request.COOKIES:
serverResponce.set_cookie("playerID", value=str(request.POST.get("PlayerName")))
"""CREATE PLAYERID FOR NEW PLAYERS"""
try:
currentPlayer = app.models.Player.objects.get(ImgNumber=request.POST.get("PlayerName"))
except:
newPlayer = app.models.Player(ImgNumber=request.POST.get("PlayerName"),UserName=app.models.Person.objects.get(ImgNumber=request.POST.get("PlayerName")).UnformattedName)
print(newPlayer)
print(newPlayer.ImgNumber)
newPlayer.save()
currentPlayer = newPlayer
else:
"""ACTIVATE PLAYER"""
currentPlayer = app.models.Player.objects.get(ImgNumber=request.COOKIES['playerID'])
# if app.models.Player.get(playerID=str(request.POST.get("PlayerName"))) == None:
print('currentPlayer', currentPlayer)
"""MAIN STUFF"""
print("peviousCorrect, previousAnswer: ", previousCorrect, previousAnswer)
print("POST:" , request.POST)
print("GET:" , request.GET)
print("COOKIES:", request.COOKIES)
if previousCorrect is not None and previousAnswer is not None:
# debug print to server console:
# print(previousCorrect, previousAnswer, str(previousAnswer) == str(previousCorrect))
# assign the person variable that we will use for the responce just
# submitted, this is based off of the hidden input slot in the previous
# HTML page
p = app.models.Person.objects.get(
ImgNumber=request.POST.get("correctImgId", default=None))
# add one to number of times shown:
if previousCorrect == previousAnswer:
# print("LAST QUESTION WAS CORRECT")
p.NumCorrect += 1
p.save()
print(str(p.UnformattedName) + "Now has " + str(p.NumCorrect))
currentPlayerNumCorrect = int(request.COOKIES["playerNumCorrect"])
serverResponce.set_cookie(
"playerNumCorrect", value=currentPlayerNumCorrect + 1)
currentPlayerNumCorrect += 1
currentPlayer.AnswersCorrect += 1
else:
# print("LAST QUESTION WAS INCORRECT")
p.NumIncorrect += 1
print(str(p.UnformattedName) + "Now has " + str(p.NumIncorrect) + " Incorrect")
p.save()
currentPlayerNumCorrect = request.COOKIES["playerNumCorrect"]
serverResponce.set_cookie("incorrectAnswers", value=request.COOKIES[
"incorrectAnswers"] + str(p.ImgNumber) + ',')
currentPlayer.AnswersIncorrect += 1
# add that person to the list of already used
serverResponce.set_cookie("alreadyUsed", value=request.COOKIES[
"alreadyUsed"] + str(answer.ImgNumber) + ',')
currentPlayer.QuestionsAnswered += 1
currentPlayer.CorrectPercentage = (currentPlayer.AnswersCorrect/currentPlayer.QuestionsAnswered)
currentPlayer.save()
# if this is less than the 26th question, return the main question page
# else: return the results page
if int(request.GET.get("q", default=1)) < 25 + 1:
return serverResponce
else:
"""CALCULATE PERCENT CORRECT"""
totalPlayed = 25
percent = (float(currentPlayerNumCorrect) / float(totalPlayed)) * 100.0
"""DEFINE SERVER RESPONCE"""
list = [app.models.Person.objects.get(ImgNumber=request.COOKIES['incorrectAnswers'].split(',')[i]) for i in
range(len(request.COOKIES['incorrectAnswers'].split(',')) - 1)]
print(list)
for i in list:
i.ImgNumber = str(i.ImgNumber)
i.UnformattedName = i.UnformattedName.replace("\"", "")
print(list)
serverResultsResponce = render(
request,
'app/results.html',
context_instance=RequestContext(
request,
{
'title': 'Results',
'numCorrect': currentPlayerNumCorrect,
'numAnswered': totalPlayed,
'percentColor': getColor(percent),
'percentScore': percent,
'version': "1.0",
'list': list,
}))
"""DELETE COOKIES"""
serverResultsResponce.delete_cookie("playerNumCorrect")
return serverResultsResponce
|
#! /usr/bin/env python3
import serial
import signal
from time import sleep
import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QSpinBox, QMessageBox)
from PyQt5.QtCore import QThread
class SerialReadThread(QThread):
def __init__(self, serial):
QThread.__init__(self)
self.ser = serial
def __del__(self):
self.wait()
def run(self):
self.logFile = open("LogFile.txt", "w")
running = True
while running:
try:
line = self.ser.readline()
self.logFile.write(str(line) + "\n")
self.logFile.flush()
print(line)
except:
running = False
sleep(0.1)
print('Closing Serial port\n')
self.ser.close() # close port
print('Program ended\n')
##*------------------------------------------
class SerialGUI(QWidget):
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
print('Closing port')
self.ser.close();
sys.exit(0)
else:
event.ignore()
def closeHandler(self, signal, frame):
print('Pressed ctrl+c, closing port')
self.ser.close();
sys.exit(0)
def startRecord(self):
self.ser.write(bytearray("custommod ble_record "+ str(self.startId.value()) +"\r", "ascii"))
self.ser.flush()
sleep(0.1)
def stopRecord(self):
self.ser.write(bytearray("custommod stop\r", "ascii"))
self.ser.flush()
sleep(0.1)
def sendEvent(self):
bytes = bytearray(self.sendText.toPlainText()+'\r\n',"UTF-8")
self.ser.write(bytes)
def __init__(self):
super( ).__init__()
self.initUI()
def initUI(self):
#self.sendButton = QPushButton('Send', self)
#self.sendButton.clicked.connect(self.sendEvent);
#self.sendText = QTextEdit('Send', self)
#self.sendButton.resize(self.sendButton.sizeHint())
#self.sendText.resize(self.sendText.sizeHint())
#self.sendButton.move(50, 50)
#self.sendText.move(50, 100)
self.startButton = QPushButton('Start', self)
self.startButton.resize(self.startButton.sizeHint())
self.startButton.clicked.connect(self.startRecord);
self.startButton.move(50, 50)
self.startId = QSpinBox(self)
self.startId.resize(self.startId.sizeHint())
self.startId.move(200, 50)
self.stopButton = QPushButton('Stop', self)
self.stopButton.resize(self.stopButton.sizeHint())
self.stopButton.clicked.connect(self.stopRecord);
self.stopButton.move(50, 100)
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('SerialWritter')
signal.signal(signal.SIGINT, self.closeHandler)
print('Trying to open serial port\n')
try:
self.ser = serial.Serial('/dev/ttyACM0', 38400, timeout=1, parity=serial.PARITY_NONE, rtscts=1)
except:
print('Error openning serial port!')
exit() # your logic here
print('Serial port openned\n')
self.ser.write(b'status\n')
self.readThread = SerialReadThread(self.ser)
self.readThread.start()
app = QApplication(sys.argv)
ex = SerialGUI()
ex.show()
sys.exit(app.exec_())
##*------------------------------------------
|
import random
from abc import abstractmethod
import numpy as np
from tensorflow import keras
from constants import INDEX_RETURN_INDICATOR_NUMBER
from model.constants import FUND1_RETURN_NAME, FUND1_BENCHMARK_RETURN_NAME, \
FUND2_RETURN_NAME, FUND2_BENCHMARK_RETURN_NAME, INDEX_RETURN_NAME, MAIN_OUTPUT_NAME, AUXILIARY_OUTPUT_NAME
from .combine_data import COMBINATION_COLUMN_RANGE_KEY_FUND_RETURN, \
COMBINATION_COLUMN_RANGE_KEY_FUND_BENCHMARK_RETURN, COMBINATION_COLUMN_RANGE_KEY_INDEX_RETURN, \
COMBINATION_COLUMN_RANGE_KEY_CORRELATION, combination_column_range_map
from .dataset_name import DATASET_NAME_PREDICT
from .load_dataset import load_preprocessed_dataset_np
from .preprocess import min_max_map, MIN_MAX_KEY_TARGET
from .util import parse_index, parse_square_ex_index
_frs = combination_column_range_map[COMBINATION_COLUMN_RANGE_KEY_FUND_RETURN][0] - 1
_fbrs = combination_column_range_map[COMBINATION_COLUMN_RANGE_KEY_FUND_BENCHMARK_RETURN][0] - 1
_irr = tuple(x - 1 for x in combination_column_range_map[COMBINATION_COLUMN_RANGE_KEY_INDEX_RETURN])
_cs = combination_column_range_map[COMBINATION_COLUMN_RANGE_KEY_CORRELATION][0] - 1
_max_c = np.float32(min_max_map[MIN_MAX_KEY_TARGET][1])
class BaseDataGenerator(keras.utils.Sequence):
def __init__(self, dataset_name, rolling_window_size, row_start=None, row_end=None, step=None,
max_batch_size=199, does_shuffle=True):
if rolling_window_size <= 0:
raise NonPositiveRollingWindowSizeException(rolling_window_size)
if max_batch_size <= 0:
raise NonPositiveBatchSizeException(max_batch_size)
if step is None:
step = 1
if step <= 0:
raise NonPositiveStepException(step)
self._dataset_name = dataset_name.lower()
self._is_for_prediction = (self._dataset_name == DATASET_NAME_PREDICT.lower())
self._rolling_window_size = rolling_window_size
self._step = step
self._batch_size = max_batch_size
self._does_shuffle = does_shuffle
self._dataset = load_preprocessed_dataset_np(self._dataset_name)
if row_start is not None:
if row_end is not None:
self._dataset = self._dataset[row_start:row_end]
else:
self._dataset = self._dataset[row_start:]
elif row_end is not None:
self._dataset = self._dataset[:row_end]
self._row_number = len(self._dataset)
if self._rolling_window_size > self._row_number:
raise RollingWindowSizeTooLargeException(self._rolling_window_size, self._row_number)
snpr = self.get_sample_number_per_row()
self._sample_number = ((self._row_number - self._rolling_window_size + 1) // self._step) * snpr
if self._batch_size is None or self._batch_size > self._sample_number:
self._batch_size = self._sample_number
self._index_sequence = list(range(self._sample_number))
self.on_epoch_end()
def __getitem__(self, index):
indexes = self._index_sequence[index * self._batch_size: (index+1) * self._batch_size]
fund1_return = self.__new_empty_array(1)
fund1_benchmark_return = self.__new_empty_array(1)
fund2_return = self.__new_empty_array(1)
fund2_benchmark_return = self.__new_empty_array(1)
index_return = self.__new_empty_array(INDEX_RETURN_INDICATOR_NUMBER)
input_dict = {
FUND1_RETURN_NAME: fund1_return,
FUND1_BENCHMARK_RETURN_NAME: fund1_benchmark_return,
FUND2_RETURN_NAME: fund2_return,
FUND2_BENCHMARK_RETURN_NAME: fund2_benchmark_return,
INDEX_RETURN_NAME: index_return,
}
if not self._is_for_prediction:
correlation = self.__new_empty_array(None)
output_dict = {MAIN_OUTPUT_NAME: correlation, AUXILIARY_OUTPUT_NAME: correlation}
else:
output_dict = None
self._feed_batch(indexes=indexes, input_dict=input_dict, output_dict=output_dict)
if not self._is_for_prediction:
return input_dict, output_dict
else:
return input_dict
def __len__(self):
return int(self._sample_number // self._batch_size)
def on_epoch_end(self):
if self._does_shuffle:
random.shuffle(self._index_sequence)
def get_dataset_name(self):
return self._dataset_name
def get_rolling_window_size(self):
return self._rolling_window_size
def get_step(self):
return self._step
def get_batch_size(self):
return self._batch_size
def get_sample_number(self):
return self._sample_number
def is_for_prediction(self):
return self._is_for_prediction
def _get_sub_dataset(self, start, end):
return self._dataset[start: end]
@abstractmethod
def get_sample_number_per_row(self):
raise NotImplementedError
@abstractmethod
def _feed_batch(self, indexes, input_dict, output_dict):
raise NotImplementedError
def __new_empty_array(self, length):
if length is not None:
a = np.empty(shape=(self._batch_size, self._rolling_window_size, length), dtype=np.float32)
else:
a = np.empty(shape=(self._batch_size, 1), dtype=np.float32)
return a
class DataGenerator(BaseDataGenerator):
def __init__(self, dataset_name, rolling_window_size, row_start=None, row_end=None, step=None,
max_batch_size=199, does_shuffle=True):
super().__init__(
dataset_name=dataset_name,
rolling_window_size=rolling_window_size,
row_start=row_start,
row_end=row_end,
step=step,
max_batch_size=max_batch_size,
does_shuffle=does_shuffle
)
def get_sample_number_per_row(self):
return 19900
def _feed_batch(self, indexes, input_dict, output_dict):
rws = self.get_rolling_window_size()
f1r = input_dict[FUND1_RETURN_NAME]
f1br = input_dict[FUND1_BENCHMARK_RETURN_NAME]
f2r = input_dict[FUND2_RETURN_NAME]
f2br = input_dict[FUND2_BENCHMARK_RETURN_NAME]
ir = input_dict[INDEX_RETURN_NAME]
c = None
if output_dict is not None:
c = output_dict[MAIN_OUTPUT_NAME]
for i, index in enumerate(indexes, start=0):
date_seq_no, fund1_no, fund2_no, c_no = parse_index(index, step=self.get_step())
date_seq_end_no = date_seq_no + rws
sub_dataset = self._get_sub_dataset(date_seq_no, date_seq_end_no)
f1r[i] = np.expand_dims(sub_dataset[:, _frs+fund1_no], axis=-1)
f1br[i] = np.expand_dims(sub_dataset[:, _fbrs+fund1_no], axis=-1)
f2r[i] = np.expand_dims(sub_dataset[:, _frs+fund2_no], axis=-1)
f2br[i] = np.expand_dims(sub_dataset[:, _fbrs+fund2_no], axis=-1)
ir[i] = sub_dataset[:, _irr[0]:_irr[1]]
if c is not None:
c[i][0] = sub_dataset[-1][_cs+c_no]
class SquareExDataGenerator(BaseDataGenerator):
def __init__(self, dataset_name, rolling_window_size, row_start=None, row_end=None, step=None,
max_batch_size=200, does_shuffle=True):
super().__init__(
dataset_name=dataset_name,
rolling_window_size=rolling_window_size,
row_start=row_start,
row_end=row_end,
step=step,
max_batch_size=max_batch_size,
does_shuffle=does_shuffle,
)
def get_sample_number_per_row(self):
return 40000
def _feed_batch(self, indexes, input_dict, output_dict):
rws = self.get_rolling_window_size()
f1r = input_dict[FUND1_RETURN_NAME]
f1br = input_dict[FUND1_BENCHMARK_RETURN_NAME]
f2r = input_dict[FUND2_RETURN_NAME]
f2br = input_dict[FUND2_BENCHMARK_RETURN_NAME]
ir = input_dict[INDEX_RETURN_NAME]
c = None
if output_dict is not None:
c = output_dict[MAIN_OUTPUT_NAME]
for i, index in enumerate(indexes, start=0):
date_seq_no, fund1_no, fund2_no, c_no = parse_square_ex_index(index, step=self.get_step())
date_seq_end_no = date_seq_no + rws
sub_dataset = self._get_sub_dataset(date_seq_no, date_seq_end_no)
f1r[i] = np.expand_dims(sub_dataset[:, _frs+fund1_no], axis=-1)
f1br[i] = np.expand_dims(sub_dataset[:, _fbrs+fund1_no], axis=-1)
f2r[i] = np.expand_dims(sub_dataset[:, _frs+fund2_no], axis=-1)
f2br[i] = np.expand_dims(sub_dataset[:, _fbrs+fund2_no], axis=-1)
ir[i] = sub_dataset[:, _irr[0]:_irr[1]]
if c is not None:
if c_no is not None:
c[i][0] = sub_dataset[-1][_cs+c_no]
else:
c[i][0] = _max_c
class NonPositiveRollingWindowSizeException(ValueError):
def __init__(self, rolling_window_size):
super().__init__('rolling_window_size(%d) is not positive.' % rolling_window_size)
class RollingWindowSizeTooLargeException(ValueError):
def __init__(self, rolling_window_size, row_number):
super().__init__('rolling_window_size(%d) is larger than the number of rows(%d).' %
(rolling_window_size, row_number))
class NonPositiveStepException(ValueError):
def __init__(self, step):
super().__init__('step(%d) is not positive.' % step)
class NonPositiveBatchSizeException(ValueError):
def __init__(self, batch_size):
super().__init__('batch_size(%d) is not positive.' % batch_size)
|
# Copyright 2017 Robert Csordas. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
import BoxEngine.BoxUtils as BoxUtils
def smooth_l1(x):
with tf.name_scope("smooth_l1"):
abs_x = tf.abs(x)
lessMask = tf.cast(abs_x < 1.0, tf.float32)
return lessMask * (0.5 * tf.square(x)) + (1.0 - lessMask) * (abs_x - 0.5)
def reshapeAll(l, shape=[-1]):
with tf.name_scope("reshapeAll"):
res = []
for e in l:
res.append(tf.reshape(e, shape))
return res
def boxRegressionLoss(boxes, rawSizes, refBoxes, boxSizes):
with tf.name_scope("rawBoxRegressionLoss"):
x, y, w, h = BoxUtils.x0y0x1y1_to_xywh(*tf.unstack(boxes, axis=1))
wRel, hRel = tf.unstack(rawSizes, axis=1)
boxH, boxW = tf.unstack(boxSizes, axis=1)
ref_x, ref_y, ref_w, ref_h = BoxUtils.x0y0x1y1_to_xywh(*tf.unstack(refBoxes, axis=1))
x, y, wRel, hRel, boxH, boxW, ref_x, ref_y, ref_w, ref_h = reshapeAll(
[x, y, wRel, hRel, boxH, boxW, ref_x, ref_y, ref_w, ref_h])
wrelRef = tf.log(ref_w / boxW)
hrelRef = tf.log(ref_h / boxH)
# Smooth L1 loss is defined on NN output values, but only the box sizes are available here. However
# we can transform back the coordinates in a numerically stable way in the NN output space:
#
# tx-tx' = (x-x')/wa
return smooth_l1((x - ref_x) / boxW) + smooth_l1((y - ref_y) / boxH) + smooth_l1(wRel - wrelRef) + smooth_l1(
hRel - hrelRef)
|
import pymongo
from decouple import config
from mongoengine import connect
host = config("HOST")
db = config("DB")
user = config("USERNAME")
pwd = config("PASSWORD")
conn = connect(db=db, username=user, password=pwd, host=host)
|
#!/usr/bin/env python
class AppControllerException(Exception):
"""A special Exception class that should be thrown if the user tries to
interact with an AppController, but receives a failure message back from
that AppController.
"""
pass
class AppEngineConfigException(Exception):
"""A special Exception class that should be thrown if there is a problem
with the user's App Engine application (e.g., it has no app.yaml or web.xml,
or has a malformed appid).
"""
pass
class AppScaleException(Exception):
"""A special Exception class that should be thrown if the user tries to
interact with an AppScale deployment, but it's not in the expected
state. Examples of this include scenarios when AppScale configuration
files aren't written locally, or if we expect AppScale to be running
and it isn't.
"""
pass
class AppScalefileException(Exception):
"""A special Exception class that should be thrown if the user tries to
run an appscale command that interacts with an AppScalefile and the
file is either malformed or in an unexpected state.
"""
pass
class BadConfigurationException(Exception):
"""A special Exception class that should be thrown if the user attempts
to execute a command with malformed arguments.
"""
pass
class ShellException(Exception):
"""A special Exception class that should be thrown if a shell command is
executed and has a non-zero return value.
"""
pass
class TimeoutException(Exception):
"""A special Exception class that should be thrown if a function is executed
but takes longer to run than the caller expects.
"""
pass
class UsageException(Exception):
"""A special Exception class that should be thrown if the user attempts
to run the 'help' directive, which reports on the usage of this tool.
"""
pass
|
import re
from IPython.core.debugger import Tracer; debughere=Tracer()
import logging
import threading
from paths import PATH
import subprocess
import os
import json
import datetime
from utility import eval_url
from exceptions import WrongDomainSyntax, DomainNoIp
from misc.settings import raas_dictconfig
import logging
from logging.config import dictConfig
class DirectoryTraversal(threading.Thread):
def __init__(self, env="", load=False):
super(DirectoryTraversal, self).__init__()
self.thread = threading.Thread(target=self.run, args=())
self.thread.deamon = True
dictConfig(raas_dictconfig)
self.lgg = logging.getLogger("RAAS_spider")
self.env = env
self.result_list = []
self.fin = 0
self.scanned_hosts = []
def run(self, target, port=[""]):
print("[*] Running Module: Directory Traversal")
if not (target,port) in self.scanned_hosts:
self.scanned_hosts.append((target,port))
return self.run_dirsearch(target, port)
else:
print("is in scanned hosts")
def run_dirsearch(self, target, port="", extension="php,js,txt,yml", wordlist=""):
url = ""
url_dict = ""
try:
url, url_dict = eval_url(target, port)
except (DomainNoIp, WrongDomainSyntax) as e:
print(e)
self.lgg.exception("Got Error:")
return -1
if not url:
return -1
print("\t[+] Starting Dirsearch for {}".format(url))
timedate = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S.%f")[:-3]
report_name = "temp/"+timedate+"_"+self.env['project']+"_"+self.env['dftype']
print(report_name)
cmd_arr = [PATH["dirsearch"],
'-u', url,
'-e', extension,
'--json-report='+report_name]
if wordlist:
cmd_arr.append("-w")
cmd_arr.append(wordlist)
p = subprocess.run(cmd_arr, stdout=subprocess.PIPE)
uri = ""
try:
with open(report_name,'r') as f:
output = f.read()
output = json.loads(output)
uri = list(output.keys())[0]
resultdata = output[uri]
except Exception as e:
print(e)
self.lgg.exception("Got Error:")
resultdata = [{"status":"noscan",
"content-length":"",
"redirect":"",
"path":""}]
resultdata = [dict(item, **{'uri':uri,
'port':url_dict['port'],
'ssl':url_dict['ssl'],
'domain':url_dict['base_url'].split("/")[0]}) for item in resultdata]
self.result_list.append(resultdata)
try:
os.remove(report_name)
except Exception as e:
print(e)
pass
return resultdata
def get_result_list(self):
return self.result_list
if __name__ == '__main__':
dt = DirectoryTraversal(env={'project':'text','dftype':'dirtraversal'})
dt.run("https://eurid.eu")
debug_here()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import glob
import sys
import os
import json
import logging
import warnings
import datetime
import io
from string import Template
from shutil import copyfile
warnings.filterwarnings("ignore")
import numpy as np
import skimage
import skimage.io
import skimage.exposure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.colors as color
import sct_utils as sct
from spinalcordtoolbox.image import Image
import spinalcordtoolbox.reports.slice as qcslice
from spinalcordtoolbox import __sct_dir__
logger = logging.getLogger(__name__)
class QcImage(object):
"""
Class used to create a .png file from a 2d image produced by the class "Slice"
"""
_labels_regions = {'PONS': 50, 'MO': 51,
'C1': 1, 'C2': 2, 'C3': 3, 'C4': 4, 'C5': 5, 'C6': 6, 'C7': 7,
'T1': 8, 'T2': 9, 'T3': 10, 'T4': 11, 'T5': 12, 'T6': 13, 'T7': 14, 'T8': 15, 'T9': 16,
'T10': 17, 'T11': 18, 'T12': 19,
'L1': 20, 'L2': 21, 'L3': 22, 'L4': 23, 'L5': 24,
'S1': 25, 'S2': 26, 'S3': 27, 'S4': 28, 'S5': 29,
'Co': 30}
_color_bin_green = ["#ffffff", "#00ff00"]
_color_bin_red = ["#ffffff", "#ff0000"]
_labels_color = ["#04663c", "#ff0000", "#50ff30",
"#ed1339", "#ffffff", "#e002e8",
"#ffee00", "#00c7ff", "#199f26",
"#563691", "#848545", "#ce2fe1",
"#2142a6", "#3edd76", "#c4c253",
"#e8618a", "#3128a3", "#1a41db",
"#939e41", "#3bec02", "#1c2c79",
"#18584e", "#b49992", "#e9e73a",
"#3b0e6e", "#6e856f", "#637394",
"#36e05b", "#530a1f", "#8179c4",
"#e1320c", "#52a4df", "#000ab5",
"#4a4242", "#0b53a5", "#b49c19",
"#50e7a9", "#bf5a42", "#fa8d8e",
"#83839a", "#320fef", "#82ffbf",
"#360ee7", "#551960", "#11371e",
"#e900c3", "#a21360", "#58a601",
"#811c90", "#235acf", "#49395d",
"#9f89b0", "#e08e08", "#3d2b54",
"#7d0434", "#fb1849", "#14aab4",
"#a22abd", "#d58240", "#ac2aff"]
# _seg_colormap = plt.cm.autumn
def __init__(self, qc_report, interpolation, action_list, stretch_contrast=True,
stretch_contrast_method='contrast_stretching', angle_line=None):
"""
Parameters
----------
qc_report : QcReport
The QC report object
interpolation : str
Type of interpolation used in matplotlib
action_list : list of functions
List of functions that generates a specific type of images
stretch_contrast : adjust image so as to improve contrast
stretch_contrast_method: {'contrast_stretching', 'equalized'}: Method for stretching contrast
angle_line: [float]: See generate_qc()
"""
self.qc_report = qc_report
self.interpolation = interpolation
self.action_list = action_list
self._stretch_contrast = stretch_contrast
self._stretch_contrast_method = stretch_contrast_method
self._angle_line = angle_line
self._centermass = None # center of mass returned by slice.Axial.get_center()
"""
action_list contain the list of images that has to be generated.
It can be seen as "figures" of matplotlib to be shown
Ex: if 'colorbar' is in the list, the process will generate a color bar in the "img" folder
"""
def line_angle(self, mask, ax):
"""Create figure with line superposed over each mosaic square. The line has an angle encoded in the
argument self._angle_line"""
angles = np.full_like(np.zeros(len(self._centermass)), np.nan)
angles[0:len(self._angle_line)] = self._angle_line
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
for nslice, center_mosaic in enumerate(self._centermass):
if np.isnan(angles[nslice]):
pass
else:
x0, y0 = center_mosaic[0], center_mosaic[1]
angle = angles[nslice]
if not (-np.pi <= angle <= np.pi):
raise Exception("angle prompted for angle_line not in the range [-pi pi]")
x_min, y_min = x0 - 10, y0 - 10
x_max, y_max = x0 + 10, y0 + 10
if -np.pi/4 < angle <= np.pi/4 or -np.pi <= angle <= -3*np.pi/4 or 3*np.pi/4 < angle <= np.pi:
y1 = y_min
y2 = y_max
x1 = (y_min - y0) * np.tan(angle) + x0
x2 = (y_max - y0) * np.tan(angle) + x0
else:
x1 = x_min
x2 = x_max
y1 = y0 + (x_min - x0) / np.tan(angle)
y2 = y0 + (x_max - x0) / np.tan(angle)
ax.plot([x1, x2], [y1, y2], '-', color='red', linewidth=0.7)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def listed_seg(self, mask, ax):
"""Create figure with red segmentation. Common scenario."""
img = np.rint(np.ma.masked_where(mask < 1, mask))
ax.imshow(img,
cmap=color.ListedColormap(self._color_bin_red),
norm=color.Normalize(vmin=0, vmax=1),
interpolation=self.interpolation,
alpha=1,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def template(self, mask, ax):
"""Show template statistical atlas"""
values = mask
values[values < 0.5] = 0
color_white = color.colorConverter.to_rgba('white', alpha=0.0)
color_blue = color.colorConverter.to_rgba('blue', alpha=0.7)
color_cyan = color.colorConverter.to_rgba('cyan', alpha=0.8)
cmap = color.LinearSegmentedColormap.from_list('cmap_atlas',
[color_white, color_blue, color_cyan], N=256)
ax.imshow(values,
cmap=cmap,
interpolation=self.interpolation,
aspect=self.aspect_mask)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def no_seg_seg(self, mask, ax):
"""Create figure with image overlay. Notably used by sct_registration_to_template"""
ax.imshow(mask, cmap='gray', interpolation=self.interpolation, aspect=self.aspect_mask)
self._add_orientation_label(ax)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def sequential_seg(self, mask, ax):
values = np.ma.masked_equal(np.rint(mask), 0)
ax.imshow(values,
cmap=self._seg_colormap,
interpolation=self.interpolation,
aspect=self.aspect_mask)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def label_vertebrae(self, mask, ax):
"""Draw vertebrae areas, then add text showing the vertebrae names"""
from matplotlib import colors
import scipy.ndimage
img = np.rint(np.ma.masked_where(mask < 1, mask))
ax.imshow(img,
cmap=colors.ListedColormap(self._labels_color),
norm=colors.Normalize(vmin=0, vmax=len(self._labels_color)),
interpolation=self.interpolation,
alpha=1,
aspect=float(self.aspect_mask))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
a = [0.0]
data = mask
for index, val in np.ndenumerate(data):
if val not in a:
a.append(val)
index = int(val)
if index in self._labels_regions.values():
color = self._labels_color[index]
y, x = scipy.ndimage.measurements.center_of_mass(np.where(data == val, data, 0))
# Draw text with a shadow
x += 10
label = list(self._labels_regions.keys())[list(self._labels_regions.values()).index(index)]
ax.text(x, y, label, color='black', clip_on=True)
x -= 0.5
y -= 0.5
ax.text(x, y, label, color=color, clip_on=True)
def highlight_pmj(self, mask, ax):
"""Hook to show a rectangle where PMJ is on the slice"""
y, x = np.where(mask == 50)
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
ax.text(x, y, 'X', color='lime', clip_on=True)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def vertical_line(self, mask, ax):
"""Centered vertical line to assess quality of straightening"""
img = np.full_like(mask, np.nan)
ax.imshow(img, cmap='gray', alpha=0, aspect=float(self.aspect_mask))
ax.axvline(x=img.shape[1]/2.0, color='r', linewidth=2)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# def colorbar(self):
# fig = plt.figure(figsize=(9, 1.5))
# ax = fig.add_axes([0.05, 0.80, 0.9, 0.15])
# colorbar.ColorbarBase(ax, cmap=self._seg_colormap, orientation='horizontal')
# return '{}_colorbar'.format(self.qc_report.img_base_name)
def __call__(self, func):
"""wrapped function (f).
In this case, it is the "mosaic" or "single" methods of the class "Slice"
Parameters
----------
func : function
The wrapped function
"""
def wrapped_f(sct_slice, *args):
"""
Parameters
----------
sct_slice : spinalcordtoolbox.report.slice:Slice
args : list
Returns
-------
"""
self.qc_report.slice_name = sct_slice.get_name()
# Get the aspect ratio (height/width) based on pixel size. Consider only the first 2 slices.
aspect_img, self.aspect_mask = sct_slice.aspect()[:2]
self.qc_report.make_content_path()
logger.info('QcImage: %s with %s slice', func.__name__, sct_slice.get_name())
if self._angle_line is None:
img, mask = func(sct_slice, *args)
else:
[img, mask], centermass = func(sct_slice, *args)
self._centermass = centermass
if self._stretch_contrast:
def equalized(a):
"""
Perform histogram equalization using CLAHE
Notes:
- Image value range is preserved
- Workaround for adapthist artifact by padding (#1664)
"""
winsize = 16
min_, max_ = a.min(), a.max()
b = (np.float32(a) - min_) / (max_ - min_)
b[b >= 1] = 1 # 1+eps numerical error may happen (#1691)
h, w = b.shape
h1 = (h + (winsize - 1)) // winsize * winsize
w1 = (w + (winsize - 1)) // winsize * winsize
if h != h1 or w != w1:
b1 = np.zeros((h1, w1), dtype=b.dtype)
b1[:h, :w] = b
b = b1
c = skimage.exposure.equalize_adapthist(b, kernel_size=(winsize, winsize))
if h != h1 or w != w1:
c = c[:h, :w]
return np.array(c * (max_ - min_) + min_, dtype=a.dtype)
def contrast_stretching(a):
p2, p98 = np.percentile(a, (2, 98))
return skimage.exposure.rescale_intensity(a, in_range=(p2, p98))
func_stretch_contrast = {'equalized': equalized,
'contrast_stretching': contrast_stretching}
img = func_stretch_contrast[self._stretch_contrast_method](img)
fig = Figure()
# if axial mosaic restrict width
if sct_slice.get_name() == 'Axial':
size_fig = [5, 5 * img.shape[0] / img.shape[1]] # with dpi=300, will give 1500pix width
# if sagittal orientation restrict height
elif sct_slice.get_name() == 'Sagittal':
size_fig = [5 * img.shape[1] / img.shape[0], 5]
fig.set_size_inches(size_fig[0], size_fig[1], forward=True)
FigureCanvas(fig)
ax = fig.add_axes((0, 0, 1, 1))
ax.imshow(img, cmap='gray', interpolation=self.interpolation, aspect=float(aspect_img))
self._add_orientation_label(ax)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
self._save(fig, self.qc_report.qc_params.abs_bkg_img_path(), dpi=self.qc_report.qc_params.dpi)
for action in self.action_list:
logger.debug('Action List %s', action.__name__)
if self._stretch_contrast and action.__name__ in ("no_seg_seg",):
print("Mask type %s" % mask.dtype)
mask = func_stretch_contrast[self._stretch_contrast_method](mask)
fig = Figure()
fig.set_size_inches(size_fig[0], size_fig[1], forward=True)
FigureCanvas(fig)
ax = fig.add_axes((0, 0, 1, 1))
action(self, mask, ax)
self._save(fig, self.qc_report.qc_params.abs_overlay_img_path(), dpi=self.qc_report.qc_params.dpi)
self.qc_report.update_description_file(img.shape)
return wrapped_f
def _add_orientation_label(self, ax):
"""
Add orientation labels on the figure
:param fig: MPL figure handler
:return:
"""
if self.qc_report.qc_params.orientation == 'Axial':
# If mosaic of axial slices, display orientation labels
ax.text(12, 6, 'A', color='yellow', size=4)
ax.text(12, 28, 'P', color='yellow', size=4)
ax.text(0, 18, 'L', color='yellow', size=4)
ax.text(24, 18, 'R', color='yellow', size=4)
def _save(self, fig, img_path, format='png', bbox_inches='tight', pad_inches=0.00, dpi=300):
"""
Save the current figure into an image.
:param fig: Figure handler
:param img_path: str: path of the folder where the image is saved
:param format: str: image format
:param bbox_inches: str
:param pad_inches: float
:param dpi: int: Output resolution of the image
:return:
"""
logger.debug('Save image %s', img_path)
fig.savefig(img_path,
format=format,
bbox_inches=None,
transparent=True,
dpi=dpi)
class Params(object):
"""Parses and stores the variables that will be included into the QC details
"""
def __init__(self, input_file, command, args, orientation, dest_folder, dpi=300, dataset=None, subject=None):
"""
Parameters
:param input_file: str: the input nifti file name
:param command: str: command name
:param args: str: the command's arguments
:param orientation: str: The anatomical orientation
:param dest_folder: str: The absolute path of the QC root
:param dpi: int: Output resolution of the image
:param dataset: str: Dataset name
:param subject: str: Subject name
"""
path_in, file_in, ext_in = sct.extract_fname(os.path.abspath(input_file))
# Assuming BIDS convention, we derive the value of the dataset, subject and contrast from the `input_file`
# by splitting it into `[dataset]/[subject]/[contrast]/input_file`
abs_input_path, contrast = os.path.split(path_in)
abs_input_path, subject_tmp = os.path.split(abs_input_path)
_, dataset_tmp = os.path.split(abs_input_path)
if dataset is None:
dataset = dataset_tmp
if subject is None:
subject = subject_tmp
if isinstance(args, list):
args = sct.list2cmdline(args)
self.fname_in = file_in+ext_in
self.dataset = dataset
self.subject = subject
self.cwd = os.getcwd()
self.contrast = contrast
self.command = command
self.sct_version = sct.__version__
self.args = args
self.orientation = orientation
self.dpi = dpi
self.root_folder = dest_folder
self.mod_date = datetime.datetime.strftime(datetime.datetime.now(), '%Y_%m_%d_%H%M%S.%f')
self.qc_results = os.path.join(dest_folder, '_json/qc_'+self.mod_date+'.json')
self.bkg_img_path = os.path.join(dataset, subject, contrast, command, self.mod_date, 'bkg_img.png')
self.overlay_img_path = os.path.join(dataset, subject, contrast, command, self.mod_date, 'overlay_img.png')
def abs_bkg_img_path(self):
return os.path.join(self.root_folder, self.bkg_img_path)
def abs_overlay_img_path(self):
return os.path.join(self.root_folder, self.overlay_img_path)
class QcReport(object):
"""This class generates the quality control report.
It will also setup the folder structure so the report generator only needs to fetch the appropriate files.
"""
def __init__(self, qc_params, usage):
"""
Parameters
:param qc_params: arguments of the "-param-qc" option in Terminal
:param usage: str: description of the process
"""
self.tool_name = qc_params.command
self.slice_name = qc_params.orientation
self.qc_params = qc_params
self.usage = usage
self.assets_folder = os.path.join(__sct_dir__, 'assets')
self.img_base_name = 'bkg_img'
self.description_base_name = "qc_results"
def make_content_path(self):
"""Creates the whole directory to contain the QC report
:return: return "root folder of the report" and the "furthest folder path" containing the images
"""
# make a new or update Qc directory
target_img_folder = os.path.dirname(self.qc_params.abs_bkg_img_path())
try:
os.makedirs(target_img_folder)
except OSError as err:
if not os.path.isdir(target_img_folder):
raise err
def update_description_file(self, dimension):
"""Create the description file with a JSON structure
:param: dimension 2-tuple, the dimension of the image frame (w, h)
"""
output = {
'python': sys.executable,
'cwd': self.qc_params.cwd,
'cmdline': "{} {}".format(self.qc_params.command, self.qc_params.args),
'command': self.qc_params.command,
'sct_version': self.qc_params.sct_version,
'dataset': self.qc_params.dataset,
'subject': self.qc_params.subject,
'contrast': self.qc_params.contrast,
'fname_in': self.qc_params.fname_in,
'orientation': self.qc_params.orientation,
'background_img': self.qc_params.bkg_img_path,
'overlay_img': self.qc_params.overlay_img_path,
'dimension': '%dx%d' % dimension,
'moddate': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
logger.debug('Description file: %s', self.qc_params.qc_results)
# results = []
# Create path to store json files
path_json, _ = os.path.split(self.qc_params.qc_results)
if not os.path.exists(path_json):
os.makedirs(path_json)
# Create json file
with open(self.qc_params.qc_results, 'w+') as qc_file:
json.dump(output, qc_file, indent=1)
self._update_html_assets(get_json_data_from_path(path_json))
def _update_html_assets(self, json_data):
"""Update the html file and assets"""
assets_path = os.path.join(os.path.dirname(__file__), 'assets')
dest_path = self.qc_params.root_folder
with io.open(os.path.join(assets_path, 'index.html')) as template_index:
template = Template(template_index.read())
output = template.substitute(sct_json_data=json.dumps(json_data))
io.open(os.path.join(dest_path, 'index.html'), 'w').write(output)
for path in ['css', 'js', 'imgs', 'fonts']:
src_path = os.path.join(assets_path, '_assets', path)
dest_full_path = os.path.join(dest_path, '_assets', path)
if not os.path.exists(dest_full_path):
os.makedirs(dest_full_path)
for file_ in os.listdir(src_path):
if not os.path.isfile(os.path.join(dest_full_path, file_)):
sct.copy(os.path.join(src_path, file_),
dest_full_path)
def add_entry(src, process, args, path_qc, plane, path_img=None, path_img_overlay=None,
qcslice=None,
qcslice_operations=[],
qcslice_layout=None,
dpi=300,
stretch_contrast_method='contrast_stretching',
angle_line=None,
dataset=None,
subject=None):
"""
Create QC report.
:param src: Path to input file (only used to populate report metadata)
:param process:
:param args:
:param path_qc:
:param plane:
:param path_img: Path to image to display
:param path_img_overlay: Path to image to display on top of path_img (will flip between the two)
:param qcslice: spinalcordtoolbox.reports.slice:Axial
:param qcslice_operations:
:param qcslice_layout:
:param dpi: int: Output resolution of the image
:param stretch_contrast_method: Method for stretching contrast. See QcImage
:param angle_line: [float]: See generate_qc()
:param dataset: str: Dataset name
:param subject: str: Subject name
:return:
"""
qc_param = Params(src, process, args, plane, path_qc, dpi, dataset, subject)
report = QcReport(qc_param, '')
if qcslice is not None:
@QcImage(report, 'none', qcslice_operations, stretch_contrast_method=stretch_contrast_method,
angle_line=angle_line)
def layout(qslice):
return qcslice_layout(qslice)
layout(qcslice)
elif path_img is not None:
report.make_content_path()
report.update_description_file(skimage.io.imread(path_img).shape[:2])
copyfile(path_img, qc_param.abs_bkg_img_path())
if path_img_overlay is not None:
# User specified a second image to overlay
copyfile(path_img_overlay, qc_param.abs_overlay_img_path())
else:
# Copy the image both as "overlay" and "path_img_overlay", so it appears static.
# TODO: Leave the possibility in the reports/assets/js files to have static images (instead of having to
# flip between two images).
copyfile(path_img, qc_param.abs_overlay_img_path())
sct.printv('Successfully generated the QC results in %s' % qc_param.qc_results)
sct.printv('Use the following command to see the results in a browser:')
try:
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2":
# If user runs SCT within the official Docker distribution, the command xdg-open will not be working therefore
# we prefer to instruct the user to manually open the generated html file.
try:
# if user runs SCT within the official Docker distribution, the variable below is defined. More info at:
# https://github.com/neuropoly/sct_docker/blob/master/sct_docker.py#L84
os.environ["DOCKER"]
sct.printv('please go to "{}/" and double click on the "index.html" file'.format(path_qc), type='info')
except KeyError:
sct.printv('xdg-open "{}/index.html"'.format(path_qc), type='info')
elif _platform == "darwin":
sct.printv('open "{}/index.html"'.format(path_qc), type='info')
else:
sct.printv('open file "{}/index.html"'.format(path_qc), type='info')
except ImportError:
print("WARNING! Platform undetectable.")
def generate_qc(fname_in1, fname_in2=None, fname_seg=None, angle_line=None, args=None, path_qc=None, dataset=None,
subject=None, path_img=None, process=None):
"""
Generate a QC entry allowing to quickly review results. This function is the entry point and is called by SCT
scripts (e.g. sct_propseg).
:param fname_in1: str: File name of input image #1 (mandatory)
:param fname_in2: str: File name of input image #2
:param fname_seg: str: File name of input segmentation
:param angle_line: [list of float]: Angle [in rad, wrt. vertical line, must be between -pi and pi] to apply to the line overlaid on the image, for
each slice, for slice that don't have an angle to display, a nan is expected. To be used for assessing cord orientation.
:param args: args from parent function
:param path_qc: str: Path to save QC report
:param dataset: str: Dataset name
:param subject: str: Subject name
:param path_img: dict: Path to image to display (e.g., a graph), instead of computing the image from MRI.
:param process: str: Name of SCT function. e.g., sct_propseg
:return: None
"""
logger.info('\n*** Generate Quality Control (QC) html report ***')
dpi = 300
plane = None
qcslice_type = None
qcslice_operations = None
qcslice_layout = None
# Get QC specifics based on SCT process
# Axial orientation, switch between two input images
if process in ['sct_register_multimodal', 'sct_register_to_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_in2), Image(fname_seg)])
qcslice_operations = [QcImage.no_seg_seg]
qcslice_layout = lambda x: x.mosaic()[:2]
# Rotation visualisation
elif process in ['rotation']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.line_angle]
qcslice_layout = lambda x: x.mosaic(return_center=True)
# Axial orientation, switch between the image and the segmentation
elif process in ['sct_propseg', 'sct_deepseg_sc', 'sct_deepseg_gm']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.listed_seg]
qcslice_layout = lambda x: x.mosaic()
# Axial orientation, switch between the image and the white matter segmentation (linear interp, in blue)
elif process in ['sct_warp_template']:
plane = 'Axial'
qcslice_type = qcslice.Axial([Image(fname_in1), Image(fname_seg)])
qcslice_operations = [QcImage.template]
qcslice_layout = lambda x: x.mosaic()
# Sagittal orientation, display vertebral labels
elif process in ['sct_label_vertebrae']:
plane = 'Sagittal'
dpi = 100 # bigger picture is needed for this special case, hence reduce dpi
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.label_vertebrae]
qcslice_layout = lambda x: x.single()
# Sagittal orientation, display PMJ box
elif process in ['sct_detect_pmj']:
plane = 'Sagittal'
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_seg)], p_resample=None)
qcslice_operations = [QcImage.highlight_pmj]
qcslice_layout = lambda x: x.single()
# Sagittal orientation, static image
# TODO: Add coronal orientation
elif process in ['sct_straighten_spinalcord']:
plane = 'Sagittal'
dpi = 100
qcslice_type = qcslice.Sagittal([Image(fname_in1), Image(fname_in1)], p_resample=None)
qcslice_operations = [QcImage.vertical_line]
qcslice_layout = lambda x: x.single()
# Metric outputs (only graphs)
elif process in ['sct_process_segmentation']:
assert os.path.isfile(path_img)
else:
raise ValueError("Unrecognized process: {}".format(process))
add_entry(
src=fname_in1,
process=process,
args=args,
path_qc=path_qc,
dataset=dataset,
subject=subject,
plane=plane,
path_img=path_img,
dpi=dpi,
qcslice=qcslice_type,
qcslice_operations=qcslice_operations,
qcslice_layout=qcslice_layout,
stretch_contrast_method='equalized',
angle_line=angle_line
)
def get_json_data_from_path(path_json):
"""Read all json files present in the given path, and output an aggregated json structure"""
results = []
for file_json in glob.iglob(os.path.join(path_json, '*.json')):
logger.debug('Opening: '+file_json)
with open(file_json, 'r+') as fjson:
results.append(json.load(fjson))
return results
|
# %%
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml, make_swiss_roll
from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA
from sklearn.linear_model import LogisticRegression
from sklearn.manifold import LocallyLinearEmbedding, TSNE
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
# %%
np.random.seed(4)
# %%
m = 60
w1, w2 = 0.1, 0.3
noise = 0.1
# %%
angles = np.random.rand(m) * 3 * np.pi / 2 - 0.5
X = np.empty((m, 3))
X[:, 0] = np.cos(angles) + np.sin(angles) / 2 + noise * np.random.randn(m) / 2
X[:, 1] = np.sin(angles) * 0.7 + noise * np.random.randn(m) / 2
X[:, 2] = X[:, 0] * w1 + X[:, 1] * w2 + noise * np.random.randn(m)
# %%
X_centered = X - X.mean(axis=0)
U, s, Vt = np.linalg.svd(X_centered)
c1 = Vt.T[:, 0]
c2 = Vt.T[:, 1]
print(f"Shapes: U: {U.shape}, s: {s.shape}, Vt: {Vt.shape}")
# %%
m, n = X_centered.shape
S = np.zeros(X_centered.shape)
S[:n, :n] = np.diag(s)
# %%
print(f"X_centered = U @ S @ Vt? {np.allclose(X_centered, U @ S @ Vt)}")
# %%
W2 = Vt.T[:, :2]
X2D_v1 = X_centered.dot(W2)
# %%
pca = PCA(n_components=2)
X2D = pca.fit_transform(X)
print(f"X2d == X2d_v1? {np.allclose(np.abs(X2D), np.abs(X2D_v1))}")
# print(f"Components:\n{pca.components_.T}")
# print(f"First component: {pca.components_.T[:, 0]}")
print(f"First component: {pca.components_[0]}")
print(f"Explained variance: {pca.explained_variance_}")
print(f"Explained variance ratio: {pca.explained_variance_ratio_}")
# %%
X_train, X_val = train_test_split(X, test_size=0.2)
# %%
pca = PCA()
pca.fit(X_train)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
# %%
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X_train)
print(f"Explained variance ratio: {pca.explained_variance_ratio_} "
f"({np.sum(pca.explained_variance_ratio_):.3f})")
# %%
if "mnist" in globals():
mnist = globals()["mnist"]
else:
mnist = fetch_openml("mnist_784", version=1, as_frame=False)
mnist.target = mnist.target.astype(np.uint8)
# %%
X = mnist["data"]
y = mnist["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y)
# %%
pca = PCA()
pca.fit(X_train)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
# %%
plt.figure(figsize=(6, 4), dpi=300)
plt.plot(cumsum, linewidth=3)
plt.axis([0, 400, 0, 1])
plt.xlabel("Dimensions")
plt.ylabel("Explained Variance")
plt.plot([d, d], [0, 0.95], "k:")
plt.plot([0, d], [0.95, 0.95], "k:")
plt.plot(d, 0.95, "ko")
plt.grid(True)
plt.show()
# %%
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X_train)
X_recovered = pca.inverse_transform(X_reduced)
# %%
print(f"Components: {pca.components_.shape[0]}")
# %%
def plot_digits(instances, images_per_row=5, **options):
size = 28
images_per_row = min(len(instances), images_per_row)
n_rows = (len(instances) - 1) // images_per_row + 1
n_empty = n_rows * images_per_row - len(instances)
padded_instances = np.concatenate([instances, np.zeros((n_empty, size * size))], axis=0)
image_grid = padded_instances.reshape((n_rows, images_per_row, size, size))
big_image = image_grid.transpose(0, 2, 1, 3).reshape(n_rows * size, images_per_row * size)
plt.imshow(big_image, cmap="binary", **options)
plt.axis("off")
# %%
# noinspection PyShadowingNames
def plot_recovered(X_recovered):
plt.figure(figsize=(7, 4), dpi=300)
plt.subplot(121)
plot_digits(X_train[::2100])
plt.title("Original")
plt.subplot(122)
plot_digits(X_recovered[::2100])
plt.title("Compressed")
# %%
plot_recovered(X_recovered)
plt.show()
# %%
pca = PCA(n_components=32)
X_reduced = pca.fit_transform(X_train)
X_recovered = pca.inverse_transform(X_reduced)
# %%
plot_recovered(X_recovered)
plt.show()
# %%
rnd_pca = PCA(n_components=154, svd_solver="randomized")
X_reduced = rnd_pca.fit_transform(X_train)
X_recovered = rnd_pca.inverse_transform(X_reduced)
# %%
plot_recovered(X_recovered)
plt.show()
# %%
n_batches = 100
inc_pca = IncrementalPCA(n_components=154)
# %%
for X_batch in np.array_split(X_train, n_batches):
inc_pca.partial_fit(X_batch)
# %%
X_reduced = inc_pca.transform(X_train)
X_recovered = inc_pca.inverse_transform(X_reduced)
# %%
plot_recovered(X_recovered)
plt.show()
# %%
X, t = make_swiss_roll(n_samples=1000, noise=0.2, random_state=42)
y = t > 6.9
# %%
rbf_pca = KernelPCA(n_components=2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
# %%
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap="hot")
plt.show()
# %%
clf = Pipeline([
("kpca", KernelPCA(n_components=2)),
("log_reg", LogisticRegression()),
])
# %%
param_grid = [{
"kpca__gamma": np.linspace(0.03, 0.05, 10),
"kpca__kernel": ["rbf", "sigmoid"]
}]
# %%
grid_search = GridSearchCV(clf, param_grid, cv=3)
grid_search.fit(X, y)
# %%
print(f"Best parameters: {grid_search.best_params_}")
# %%
rbf_pca = KernelPCA(n_components=2, kernel="rbf", gamma=0.0433, fit_inverse_transform=True)
X_reduced = rbf_pca.fit_transform(X)
X_preimage = rbf_pca.inverse_transform(X_reduced)
# %%
print(f"MSE for Kernel PCA: {mean_squared_error(X, X_preimage):.1f}")
# %%
lle = LocallyLinearEmbedding(n_components=2, n_neighbors=10, random_state=42)
X_reduced = lle.fit_transform(X)
# %%
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap="hot")
plt.show()
# %%
tsne = TSNE(n_components=2, learning_rate="auto", init="random", n_jobs=48, random_state=42)
X_reduced = tsne.fit_transform(X)
# %%
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=t, cmap="hot")
plt.show()
|
import os
from shutil import copyfile
from typing import Dict
from models import Mentee
from environment import *
import sys
def cleanup_files():
print('Deleting submission files...')
submission_files = os.listdir(SUBMISSIONS_DIRECTORY)
for file in submission_files:
full_path = os.path.join(SUBMISSIONS_DIRECTORY, file)
os.remove(full_path)
print('Deleting output files...')
output_files = os.listdir(OUTPUT_DIRECTORY)
for file in output_files:
full_path = os.path.join(OUTPUT_DIRECTORY, file)
os.remove(full_path)
print('Clean up complete!')
# Filter out files not corresponding to the mentees
def filter_files(mentees: Dict[str, Mentee]) -> [str]:
if not os.path.isdir(SUBMISSIONS_DIRECTORY):
print(
'Please place the submission files into the {} directory!'.format(
SUBMISSIONS_DIRECTORY))
os.mkdir(SUBMISSIONS_DIRECTORY)
submission_files = os.listdir(SUBMISSIONS_DIRECTORY)
for file in submission_files:
dash = file.index('-')
bracket = file.index(']')
file_id = file[dash + 1:bracket]
file_name = file[1:dash]
full_path = os.path.join(SUBMISSIONS_DIRECTORY, file)
if file_id in mentees:
if sys.platform == 'win32':
delimiter = '\\'
else:
delimiter = '/'
new_name = full_path.split(delimiter)[-1]
new_name = '[{}-{}]{}'.format(file_id, file_name,
new_name.split(']')[-1])
path = full_path.split(delimiter)[:-1]
path.append(new_name)
new_path = delimiter.join(path)
copyfile(full_path, new_path)
mentees[file_id].assign_submission(new_path)
else:
print("Deleting " + full_path)
os.remove(full_path)
return os.listdir(SUBMISSIONS_DIRECTORY)
# List mentees who did not submit
def print_filtering_result(mentees: Dict[str, Mentee]) -> None:
not_submitted = list(
filter(lambda m: not m.submitted, mentees.values()))
if len(not_submitted) == 0:
print('Everybody submitted!')
else:
print('Mentees with no submission:')
for noSubmit in not_submitted:
print(noSubmit.name)
|
#!/usr/bin/env python
# coding: utf-8
# # 명령문
# ## Assignment operator
# [Python library reference](<https//docs.python.org/reference/simple_stmts.html#assignment-statements>)
# says
#
# Assignment statements are used to (re)bind names to values and to
# modify attributes or items of mutable objects.
#
# In short, it works as follows (simple assignment)
#
# 1. an expression on the right hand side is evaluated, the corresponding
# object is created/obtained
# 1. a **name** on the left hand side is assigned, or bound, to the
# r.h.s. object
# Things to note
#
# * a single object can have several names bound to it
#
# ```python
# In [1] a = [1, 2, 3]
# In [2] b = a
# In [3] a
# Out[3] [1, 2, 3]
# In [4] b
# Out[4] [1, 2, 3]
# In [5] a is b
# Out[5] True
# In [6] b[1] = 'hi!'
# In [7] a
# Out[7] [1, 'hi!', 3]
# ```
# * to change a list *in place*, use indexing/slices
#
# ```python
# In [1] a = [1, 2, 3]
# In [3] a
# Out[3] [1, 2, 3]
# In [4] a = ['a', 'b', 'c'] # Creates another object.
# In [5] a
# Out[5] ['a', 'b', 'c']
# In [6] id(a)
# Out[6] 138641676
# In [7] a[] = [1, 2, 3] # Modifies object in place.
# In [8] a
# Out[8] [1, 2, 3]
# In [9] id(a)
# Out[9] 138641676 # Same as in Out[6], yours will differ...
# ```
# * the key concept here is **mutable vs. immutable**
# * mutable objects can be changed in place
# * immutable objects cannot be modified once created
# ## Control Flow
# Controls the order in which the code is executed.
# ### if/elif/else
# ```python
# >>> if 2**2 == 4:
# ... print('Obvious!')
# ...
# Obvious!
# ```
# **Blocks are delimited by indentation**
# Type the following lines in your Python interpreter, and be careful
# to **respect the indentation depth**. The Ipython shell automatically
# increases the indentation depth after a colon ``:`` sign; to
# decrease the indentation depth, go four spaces to the left with the
# Backspace key. Press the Enter key twice to leave the logical block.
# ```python
# >>> a = 10
#
# >>> if a == 1:
# ... print(1)
# ... elif a == 2:
# ... print(2)
# ... else:
# ... print('A lot')
# A lot
# ```
# Indentation is compulsory in scripts as well. As an exercise, re-type the
# previous lines with the same indentation in a script ``condition.py``, and
# execute the script with ``run condition.py`` in Ipython.
# ### for/range
# Iterating with an index:
#
# ```python
# >>> for i in range(4):
# ... print(i)
# 0
# 1
# 2
# 3
# ```
# But most often, it is more readable to iterate over values:
# ```python
# >>> for word in ('cool', 'powerful', 'readable'):
# ... print('Python is %s' % word)
# Python is cool
# Python is powerful
# Python is readable
# ```
# ### while/break/continue
# Typical C-style while loop (Mandelbrot problem):
#
# ```python
# >>> z = 1 + 1j
# >>> while abs(z) < 100:
# ... z = z**2 + 1
# >>> z
# (-134+352j)
# ```
# **More advanced features**
#
# `break` out of enclosing for/while loop:
#
# ```python
# >>> z = 1 + 1j
#
# >>> while abs(z) < 100:
# ... if z.imag == 0:
# ... break
# ... z = z**2 + 1
# ```
# `continue` the next iteration of a loop.:
#
# ```python
# >>> a = [1, 0, 2, 4]
# >>> for element in a:
# ... if element == 0:
# ... continue
# ... print(1. / element)
# 1.0
# 0.5
# 0.25
# ```
# ### Conditional Expressions
# `if <OBJECT>`
#
# Evaluates to False:
# * any number equal to zero (`0`, `0.0`, `0+0`)
# * an empty container (list, tuple, set, dictionary, ...)
# * `False`, `None`
#
# Evaluates to True:
# * everything else
# `a == b`
#
# Tests equality, with logics:
#
# ```python
# >>> 1 == 1.
# True
# ```
# `a is b`
#
# Tests identity: both sides are the same object:
#
# ```python
# >>> 1 is 1.
# False
#
# >>> a = 1
# >>> b = 1
# >>> a is b
# True
# ```
# `a in b`
#
# For any collection `b`: `b` contains `a` :
#
# ```python
# >>> b = [1, 2, 3]
# >>> 2 in b
# True
# >>> 5 in b
# False
# ```
#
# If `b` is a dictionary, this tests that `a` is a key of `b`.
# ### Advanced iteration
# **Iterate over any sequence**
#
# You can iterate over any sequence (string, list, keys in a dictionary, lines in
# a file, ...):
#
# ```python
# >>> vowels = 'aeiouy'
#
# >>> for i in 'powerful':
# ... if i in vowels:
# ... print(i)
# o
# e
# u
# ```
# ```python
# >>> message = "Hello how are you?"
# >>> message.split() # returns a list
# ['Hello', 'how', 'are', 'you?']
# >>> for word in message.split():
# ... print(word)
# ...
# Hello
# how
# are
# you?
# ```
# Few languages (in particular, languages for scientific computing) allow to
# loop over anything but integers/indices. With Python it is possible to
# loop exactly over the objects of interest without bothering with indices
# you often don't care about. This feature can often be used to make
# code more readable.
# **warning**: Not safe to modify the sequence you are iterating over.
# **Keeping track of enumeration number**
#
# Common task is to iterate over a sequence while keeping track of the
# item number.
# * Could use while loop with a counter as above. Or a for loop:
#
# ```python
# >>> words = ('cool', 'powerful', 'readable')
# >>> for i in range(0, len(words)):
# ... print((i, words[i]))
# (0, 'cool')
# (1, 'powerful')
# (2, 'readable')
# ```
# * But, Python provides a built-in function - `enumerate` - for this:
#
# ```python
# >>> for index, item in enumerate(words):
# ... print((index, item))
# (0, 'cool')
# (1, 'powerful')
# (2, 'readable')
# ```
# **Looping over a dictionary**
#
# Use **items**:
#
# ```python
# >>> d = {'a': 1, 'b':1.2, 'c':1j}
#
# >>> for key, val in sorted(d.items()):
# ... print('Key: %s has value: %s' % (key, val))
# Key: a has value: 1
# Key: b has value: 1.2
# Key: c has value: 1j
# ```
# **note**
#
# The ordering of a dictionary is random, thus we use :func:`sorted`
# which will sort on the keys.
# **Exercise**
# ref: `pi_wallis`
#
# Compute the decimals of Pi using the Wallis formula:
#
# $$
# \pi = 2 \prod_{i=1}^{\infty} \frac{4i^2}{4i^2 - 1}
# $$
#
|
# Python - 3.6.0
def oper_array(fct, arr, init):
r = [init]
for v in arr:
r.append(fct(r[-1], v))
return r[1:]
from math import gcd
gcdi = lambda a, b: gcd(abs(a), abs(b))
lcmu = lambda a, b: (abs(a) * abs(b)) // gcd(abs(a), abs(b))
som = lambda a, b: a + b
maxi = max
mini = min
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_LoginInterface.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_LoginInterface(object):
def setupUi(self, LoginInterface):
LoginInterface.setObjectName("LoginInterface")
LoginInterface.setEnabled(True)
LoginInterface.resize(463, 359)
LoginInterface.setStyleSheet("")
self.frame = QtWidgets.QFrame(LoginInterface)
self.frame.setGeometry(QtCore.QRect(10, 10, 430, 321))
self.frame.setStyleSheet("*{\n"
"\n"
" font-family:century gothic;\n"
" font-size:16px\n"
"}\n"
"\n"
"#frame\n"
"{\n"
"background-image: url(:/icon/背景图片.png);\n"
"}\n"
"QFrame\n"
"{\n"
" border-top-left-radius:15px;\n"
" border-bottom-right-radius:15px;\n"
" font: 9pt \"黑体\";\n"
"font-size:20px;\n"
"}\n"
"\n"
"QLabel\n"
"{\n"
" color:white;\n"
"\n"
"}\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"QLineEdit\n"
"{\n"
"background:transparent;\n"
"border:none;\n"
"color:#717072;\n"
"border-bottom:1px solid#717072;\n"
"}\n"
"\n"
"QCheckBox\n"
"{\n"
" color:white;\n"
"}\n"
"\n"
"\n"
"")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.ID = QtWidgets.QLineEdit(self.frame)
self.ID.setGeometry(QtCore.QRect(150, 105, 141, 30))
self.ID.setStyleSheet("QLineEdit\n"
"{\n"
" background:white;\n"
" padding-left:5px ;\n"
" padding-top:1px ;\n"
" border-bottom-left-radius:7px;\n"
" border-bottom-right-radius:7px;\n"
" border: 1px solid rgb(209 , 209 , 209);\n"
" border-top:transparent;\n"
"}\n"
"QLineEdit:hover\n"
"{\n"
" padding-top:0px ;\n"
" border: 1px solid rgb(21 , 131 , 221);\n"
"}")
self.ID.setText("")
self.ID.setObjectName("ID")
self.logIn = QtWidgets.QPushButton(self.frame)
self.logIn.setGeometry(QtCore.QRect(130, 270, 171, 31))
self.logIn.setStyleSheet("QPushButton\n"
"{\n"
" color:white;\n"
" \n"
"background-color:rgb(14 , 135 , 228);\n"
"font: 9pt \"黑体\";\n"
" border-radius:5px;font-size:18px;\n"
"}\n"
"\n"
"QPushButton:hover\n"
"{\n"
" color:white;\n"
" background-color:rgb(44 , 137 , 255);\n"
"}\n"
"\n"
"QPushButton:pressed\n"
"{\n"
" color:white;\n"
" background-color:rgb(14 , 150 , 254);\n"
" padding-left:3px;\n"
" padding-top:3px;\n"
"}\n"
"")
self.logIn.setObjectName("logIn")
self.rememberKey = QtWidgets.QCheckBox(self.frame)
self.rememberKey.setGeometry(QtCore.QRect(70, 230, 91, 31))
self.rememberKey.setObjectName("rememberKey")
self.Key = QtWidgets.QLineEdit(self.frame)
self.Key.setGeometry(QtCore.QRect(150, 145, 141, 31))
self.Key.setStyleSheet("QLineEdit\n"
"{\n"
" background:white;\n"
" padding-left:5px ;\n"
" padding-top:1px ;\n"
" border-bottom-left-radius:7px;\n"
" border-bottom-right-radius:7px;\n"
" border: 1px solid rgb(209 , 209 , 209);\n"
" border-top:transparent;\n"
"}\n"
"QLineEdit:hover\n"
"{\n"
" padding-top:0px ;\n"
" border: 1px solid rgb(21 , 131 , 221);\n"
"}")
self.Key.setText("")
self.Key.setEchoMode(QtWidgets.QLineEdit.Password)
self.Key.setObjectName("Key")
self.userID = QtWidgets.QLabel(self.frame)
self.userID.setGeometry(QtCore.QRect(60, 100, 70, 40))
self.userID.setObjectName("userID")
self.userKey = QtWidgets.QLabel(self.frame)
self.userKey.setGeometry(QtCore.QRect(60, 145, 61, 30))
self.userKey.setObjectName("userKey")
self.autLog = QtWidgets.QCheckBox(self.frame)
self.autLog.setGeometry(QtCore.QRect(210, 230, 121, 31))
self.autLog.setObjectName("autLog")
self.mallName = QtWidgets.QLabel(self.frame)
self.mallName.setGeometry(QtCore.QRect(110, 30, 221, 61))
self.mallName.setStyleSheet("QLabel\n"
"{\n"
" color:white;\n"
"font-size:35px;\n"
"}")
self.mallName.setAlignment(QtCore.Qt.AlignCenter)
self.mallName.setObjectName("mallName")
self.registAccount = QtWidgets.QPushButton(self.frame)
self.registAccount.setGeometry(QtCore.QRect(310, 110, 91, 31))
self.registAccount.setStyleSheet("QPushButton\n"
"{\n"
" color:rgb(222, 255, 253);font: 9pt \"黑体\";\n"
" background-color:transparent;\n"
"font-size:13px;\n"
"}\n"
"\n"
"QPushButton:hover\n"
"{\n"
" color:rgb(97 , 179 , 246);\n"
"}\n"
"")
self.registAccount.setObjectName("registAccount")
self.forgetPassword = QtWidgets.QPushButton(self.frame)
self.forgetPassword.setGeometry(QtCore.QRect(310, 150, 91, 31))
self.forgetPassword.setStyleSheet("QPushButton\n"
"{\n"
" color:rgb(222, 255, 253);font: 9pt \"黑体\";\n"
" background-color:transparent;\n"
"font-size:13px;\n"
"}\n"
"\n"
"QPushButton:hover\n"
"{\n"
" color:rgb(97 , 179 , 246);\n"
"}\n"
"\n"
"QPushButton:pressed\n"
"{\n"
" color:rgb(0 , 109 , 176);\n"
"}\n"
"")
self.forgetPassword.setObjectName("forgetPassword")
self.closeLogInWindow = QtWidgets.QPushButton(self.frame)
self.closeLogInWindow.setGeometry(QtCore.QRect(400, 0, 30, 30))
self.closeLogInWindow.setStyleSheet("QPushButton\n"
"{\n"
"background:transparent;\n"
"image: url(:/icon/close.png);\n"
"}\n"
"QPushButton:hover\n"
"{\n"
"image: url(:/icon/close_hover.png);\n"
"}\n"
"QPushButton:pressed\n"
"{\n"
"image: url(:/icon/close_press.png);\n"
"}")
self.closeLogInWindow.setText("")
self.closeLogInWindow.setObjectName("closeLogInWindow")
self.arrowcloseLogInWindow = QtWidgets.QPushButton(self.frame)
self.arrowcloseLogInWindow.setGeometry(QtCore.QRect(370, 0, 30, 30))
self.arrowcloseLogInWindow.setStyleSheet("QPushButton:hover\n"
"{\n"
"image: url(:/icon/min_hover.bmp);\n"
"}\n"
"QPushButton:pressed\n"
"{\n"
"image: url(:/icon/min_press.bmp);\n"
"}\n"
"QPushButton\n"
"{\n"
"background:transparent;\n"
"image: url(:/icon/min_.png);\n"
"}")
self.arrowcloseLogInWindow.setText("")
self.arrowcloseLogInWindow.setIconSize(QtCore.QSize(30, 30))
self.arrowcloseLogInWindow.setObjectName("arrowcloseLogInWindow")
self.userKey_2 = QtWidgets.QLabel(self.frame)
self.userKey_2.setGeometry(QtCore.QRect(60, 190, 61, 30))
self.userKey_2.setObjectName("userKey_2")
self.jobPosition = QtWidgets.QComboBox(self.frame)
self.jobPosition.setGeometry(QtCore.QRect(150, 190, 141, 31))
self.jobPosition.setStyleSheet("QComboBox\n"
"{\n"
" background:white;\n"
" padding-left:5px ;\n"
" border-top-left-radius:3px;\n"
" border-top-right-radius:3px;\n"
"}\n"
"QComboBox:hover\n"
"{\n"
" border: 1px solid rgb(21 , 131 , 221);\n"
"}\n"
"QComboBox QAbstractItemView::item\n"
"{\n"
" background:white;\n"
" height:40px;\n"
"}\n"
"")
self.jobPosition.setObjectName("jobPosition")
self.ID.raise_()
self.logIn.raise_()
self.rememberKey.raise_()
self.Key.raise_()
self.userID.raise_()
self.userKey.raise_()
self.autLog.raise_()
self.mallName.raise_()
self.registAccount.raise_()
self.forgetPassword.raise_()
self.userKey_2.raise_()
self.jobPosition.raise_()
self.arrowcloseLogInWindow.raise_()
self.closeLogInWindow.raise_()
self.retranslateUi(LoginInterface)
QtCore.QMetaObject.connectSlotsByName(LoginInterface)
def retranslateUi(self, LoginInterface):
_translate = QtCore.QCoreApplication.translate
LoginInterface.setWindowTitle(_translate("LoginInterface", "LoginInterface"))
self.ID.setPlaceholderText(_translate("LoginInterface", "账号/手机号"))
self.logIn.setText(_translate("LoginInterface", "登录"))
self.rememberKey.setText(_translate("LoginInterface", "记住密码"))
self.Key.setPlaceholderText(_translate("LoginInterface", "Password"))
self.userID.setText(_translate("LoginInterface", "用户名:"))
self.userKey.setText(_translate("LoginInterface", "密码:"))
self.autLog.setText(_translate("LoginInterface", "自动登录"))
self.mallName.setText(_translate("LoginInterface", "JOJO"))
self.registAccount.setText(_translate("LoginInterface", "注册账号"))
self.forgetPassword.setText(_translate("LoginInterface", "忘记密码"))
self.userKey_2.setText(_translate("LoginInterface", "职位:"))
from GUI.image.image import *
|
"""
Summary
-------
The purpose of this script is to test the reading and writing functionality of
the `calsim_toolkit` module on HEC5Q data.
"""
# %% Import libraries.
# Import standard libraries.
import os
import sys
# Import custom libraries.
cwd = os.getcwd()
this_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(this_dir)
CustDir = os.path.abspath('../..')
os.chdir(cwd)
if CustDir not in sys.path: sys.path.insert(1, CustDir)
import calsim_toolkit as cs
# %% Define functions.
def main():
fp = r'../../HEC5Q/NAA/AR_HEC5Q_Q5/Model/AR_WQ_Report.dss'
# fp = r'../../HEC5Q/NAA/AR_HEC5Q_Q5/Model/CALSIMII_HEC5Q.dss'
fp_out = r'../data/HEC5Q_test_out.dss'
part_b = 'AMERICAN'.split()
# part_b = 'GERBER_1920'.split()
# df = cs.read_dss(fp, b=part_b, end_date='2015-09-30')
# df = cs.read_dss(fp, b=part_b, c='SWRAD', end_date='2015-09-30')
df = cs.read_dss(fp, e='1DAY', end_date='2015-09-30')
df = df.cs.wide()
df.cs.to_dss(fp_out, a='CalSim3', f='JAS_dev')
return 0
# %% Execute script.
if __name__ == '__main__':
main()
|
import unittest
import solution
class TicTacHomeworkTest(unittest.TestCase):
def test_empty(self):
b = solution.TicTacToeBoard()
empty_board = '\n -------------\n' +\
'3 | | | |\n' +\
' -------------\n' +\
'2 | | | |\n' +\
' -------------\n' +\
'1 | | | |\n' +\
' -------------\n' +\
' A B C \n'
self.assertEqual(empty_board, b.__str__())
def test_full(self):
d = solution.TicTacToeBoard()
full_board = '\n -------------\n' +\
'3 | O | O | X |\n' +\
' -------------\n' +\
'2 | X | X | O |\n' +\
' -------------\n' +\
'1 | O | X | O |\n' +\
' -------------\n' +\
' A B C \n'
d["A1"] = 'O'
d["B1"] = 'X'
d["A3"] = 'O'
d["A2"] = 'X'
d["C2"] = 'O'
d["C3"] = 'X'
d["B3"] = 'O'
d["B2"] = 'X'
d["C1"] = 'O'
self.assertEqual(full_board, d.__str__())
def test_x_wins(self):
h = solution.TicTacToeBoard()
h["A1"] = 'X'
h["A2"] = 'O'
h["B1"] = 'X'
h["A3"] = 'O'
h["C1"] = 'X'
self.assertEqual('X wins!', h.game_status())
if __name__ == '__main__':
unittest.main()
|
import tkinter as tk
from lib import scraper
from lib import graphtransfer
from lib import yaml_editor
if __name__ == '__main__':
root = tk.Tk()
root.title('iDnes graph scraper')
# Create main containers
header = tk.Frame(root, width=450, height=50)
center = tk.Frame(root, width=50, height=40, padx=3, pady=3)
# layout all of the main containers
root.grid_rowconfigure(1, weight=1)
root.grid_columnconfigure(0, weight=1)
header.grid(row = 0, sticky='ew')
center.grid(row = 1, sticky='nsew')
# Widgets for header frame
header_label = tk.Label(header, text = 'iDnes Scraper to graph - dupm01')
header_label.pack(fill="both")
# create the center widgets
center.grid_rowconfigure(0, weight=1)
center.grid_columnconfigure(1, weight=1)
top_row = tk.Frame(center, width = 250)
bottom_row = tk.Frame(center, width = 250)
top_row.grid(row = 0, column = 0, sticky = "ew")
bottom_row.grid(row = 1, column = 0, sticky = "ew")
# Left column buttons
tk.Label(top_row, text = 'Neo4j server settings').grid(row = 0, column = 0, columnspan = 3, sticky = tk.W + tk.E)
tk.Label(top_row, text = 'Server:', width= 15).grid(row = 1, column = 0)
server_entry = tk.Entry(top_row)
server_entry.insert(0, 'bolt://localhost:7687')
server_entry.grid(row = 1, column = 1)
tk.Label(top_row, text = 'Username:').grid(row = 2, column = 0)
username_entry = tk.Entry(top_row)
username_entry.insert(0, 'neo4j')
username_entry.grid(row = 2, column = 1)
tk.Label(top_row, text = 'Password').grid(row = 3, column = 0)
password_entry = tk.Entry(top_row)
password_entry.insert(0, '123')
password_entry.grid(row = 3, column = 1)
tk.Button(top_row, text = "Clear DB", command = lambda: [yaml_editor.edit_conf_graph(server_entry.get(), username_entry.get(), password_entry.get()), graphtransfer.clear_db()], width= 10).grid(row = 1, column = 2, rowspan = 3, sticky = tk.N + tk.S)
# Right column buttons
tk.Label(bottom_row, text = 'Page settings').grid(row = 0, columnspan = 3, sticky = tk.W + tk.E)
tk.Label(bottom_row, text = 'Folder name:', width= 15).grid(row = 1, column = 0)
folder_entry = tk.Entry(bottom_row)
folder_entry.grid(row = 1, column = 1)
tk.Label(bottom_row, text = 'Article URL:').grid(row = 2, column = 0)
url_entry = tk.Entry(bottom_row)
url_entry.grid(row = 2, column = 1)
tk.Label(bottom_row, text = 'Pages to scrape:').grid(row = 3, column = 0)
scrapenumber_entry = tk.Entry(bottom_row)
scrapenumber_entry.grid(row = 3, column = 1)
tk.Button(bottom_row, text = "Scrape", command = lambda: [yaml_editor.edit_conf_files(folder_entry.get(), url_entry.get(), scrapenumber_entry.get(), server_entry.get(), username_entry.get(), password_entry.get()), scraper.main()], width= 10).grid(row = 1, column = 2)
tk.Button(bottom_row, text = "Send to graph", command = lambda: [yaml_editor.edit_conf_files(folder_entry.get(), url_entry.get(), scrapenumber_entry.get(), server_entry.get(), username_entry.get(), password_entry.get()), graphtransfer.main()], width= 10).grid(row = 2, column = 2)
tk.Button(bottom_row, text = "Both", command = lambda: [yaml_editor.edit_conf_files(folder_entry.get(), url_entry.get(), scrapenumber_entry.get(), server_entry.get(), username_entry.get(), password_entry.get()), scraper.main(), graphtransfer.main()], width= 10).grid(row = 3, column = 2)
root.mainloop()
|
from math import cos, sin, tan, tau
from reportlab.lib import pagesizes
from reportlab.lib.units import inch
from reportlab.pdfgen.canvas import Canvas
from .carddb import HSRarity
FONT_NAME = 'Times-Roman'
FONT_SIZE = 10
CAP_SIZE = 12
SC_SIZE = 10
CARDS_PER_CLASS = 15
LINE_HEIGHT = 12
BLOCK_HEIGHT = LINE_HEIGHT * (CARDS_PER_CLASS + 1)
GUTTER = LINE_HEIGHT / 2
BUBBLE_SIZE = 9
BUBBLE_RAD = 3
STAR_RAD = 4
LITTLE_RAD = 2 * STAR_RAD * cos(0.15 * tau) / tan(tau/5)
BUBBLE_START = -4 * BUBBLE_SIZE - GUTTER
RARITY_COLORS = {
None: (0, 0, 0),
HSRarity.FREE: (0, 0, 0),
HSRarity.COMMON: (0, 0, 0),
HSRarity.RARE: (0, 0, 1),
HSRarity.EPIC: (0.5, 0, 0.5),
HSRarity.LEGENDARY: (1, 0.5, 0),
}
def mkpdfcardlist(hs_set, cards, outpath):
def start_class(cls_name):
nonlocal alt_side, begun, y
if begun:
if y <= bottom_margin + BLOCK_HEIGHT:
if alt_side:
alt_side = False
c.showPage()
c.setFont(FONT_NAME, FONT_SIZE)
c.translate(left_margin, 0)
else:
alt_side = True
c.translate(block_shift, 0)
y = top_margin
else:
y -= LINE_HEIGHT
else:
begun = True
y -= LINE_HEIGHT
c.setStrokeColorRGB(0,0,0)
c.setFillColorRGB(0,0,0)
c.setFontSize(CAP_SIZE)
c.drawString(0, y, cls_name[0])
x = c.stringWidth(cls_name[0])
c.setFontSize(SC_SIZE)
c.drawString(x, y, cls_name[1:])
c.setFontSize(FONT_SIZE)
c.line(
BUBBLE_START,
y - LINE_HEIGHT + FONT_SIZE,
BUBBLE_START + block_width,
y - LINE_HEIGHT + FONT_SIZE,
)
def show_card(card):
nonlocal alt_side, y
if y <= bottom_margin + LINE_HEIGHT:
if alt_side:
alt_side = False
c.showPage()
c.setFont(FONT_NAME, FONT_SIZE)
c.translate(left_margin, 0)
else:
alt_side = True
c.translate(block_shift, 0)
y = top_margin
else:
y -= LINE_HEIGHT
c.setStrokeColorRGB(*RARITY_COLORS[card.rarity])
c.setFillColorRGB(*RARITY_COLORS[card.rarity])
x = BUBBLE_START
circle(c, x, y)
x += BUBBLE_SIZE
if card.rarity is not HSRarity.LEGENDARY:
circle(c, x, y)
x += BUBBLE_SIZE
star(c, x, y)
x += BUBBLE_SIZE
if card.rarity is not HSRarity.LEGENDARY:
star(c, x, y)
c.drawString(0, y, card.name)
c.drawRightString(cost_end, y, str(card.cost))
c.drawString(type_start, y, str(card.typeline))
if card.statline:
c.drawAlignedString(slash_point, y, card.statline, pivotChar='/')
c = Canvas(outpath, pagesizes.letter)
c.setFont(FONT_NAME, FONT_SIZE)
namelen = max(c.stringWidth(card.name) for _,cs in cards for card in cs)
typelen = max(c.stringWidth(card.typeline) for _,cs in cards for card in cs)
tens = c.stringWidth('88')
cost_end = namelen + GUTTER + tens
type_start = cost_end + GUTTER
slash_point = type_start + typelen + GUTTER + tens
block_width = slash_point + c.stringWidth('/') + tens - BUBBLE_START
block_shift = block_width + 2 * GUTTER
left_margin = (8.5 * inch - block_width - block_shift)/2 - BUBBLE_START
c.translate(left_margin, 0)
bottom_margin = (11 * inch - 3 * (BLOCK_HEIGHT + LINE_HEIGHT)) / 2
top_margin = 11 * inch - bottom_margin
y = top_margin
begun = False
alt_side = False
for cls, cs in cards:
start_class(str(cls).upper())
for card in cs:
show_card(card)
c.showPage()
c.save()
def circle(c, x, y):
c.saveState()
c.setLineWidth(0.5)
c.circle(x + BUBBLE_SIZE/2, y + LINE_HEIGHT/2 - FONT_SIZE/3, BUBBLE_RAD)
c.restoreState()
def star(c, x, y):
c.saveState()
c.setLineWidth(0.5)
c.translate(x + BUBBLE_SIZE/2, y + LINE_HEIGHT/2 - FONT_SIZE/3)
p = c.beginPath()
p.moveTo(0, STAR_RAD)
θ = tau/4
for r in [LITTLE_RAD, STAR_RAD] * 4 + [LITTLE_RAD]:
θ += tau/10
p.lineTo(r*cos(θ), r*sin(θ))
p.close()
c.drawPath(p, fill=0, stroke=1)
c.restoreState()
|
#!~/envs/udacity_python3_mongodb
"""
Your task is to explore the data a bit more.
The first task is a fun one - find out how many unique users
have contributed to the map in this particular area!
The function process_map should return a set of unique user IDs ("uid")
"""
import xml.etree.cElementTree as et
import pprint
import re
# Global variables
dataset_dir = 'datasets/'
dataset_file = dataset_dir + 'example.osm'
def get_user(element, users):
if element.tag == "node":
uid = element.get('user')
if uid not in users:
users.add(uid)
return users
def process_map(filename):
users = set()
for _, element in et.iterparse(filename):
users = get_user(element, users)
return users
def test():
users = process_map(dataset_file)
pprint.pprint(users)
assert len(users) == 6
if __name__ == '__main__':
test()
|
from typing_extensions import NotRequired
from django.db import models
import uuid
# Create your models here.
class Voterdb(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
Voter_id= models.CharField(max_length=10)
phone = models.CharField(max_length=13)
firstname = models.CharField(max_length=100)
lastname = models.CharField(max_length=100)
voting_status = models.BooleanField(default=False)
voter_address = models.CharField(max_length=45, blank=True)
txn_hash = models.CharField(max_length=100, blank=True)
|
import json
from src.util import Util
# Testing inputs
if __name__ == "__main__":
# Testing Place order
vals = [
b'O',
1234,
b"Claire ", # This must be 10 characters. Fill the unused characters with spaces.
b'B',
345624,
1234,
b"DAY ", # This must be 4 characters.
500000.5, # Price = $500000.5
99999,
5834,
b"P",
b"P",
1234,
b'1',
b'1'
]
b = Util.package(vals)
msg_dict = Util.unpackage(b[0:1], b[1:]) # Separate header byte from rest of the bytes.
print(json.dumps( # Make the dictionary easy to read
msg_dict,
indent = 4,
separators = (',', ': ')
))
# Testing replace order
vals = [
b'U',
1234,
1235,
100000,
5000.5,
0,
b" ",
555
]
b = Util.package(vals)
msg_dict = Util.unpackage(b[0:1], b[1:])
print(json.dumps( # Make the dictionary easy to read
msg_dict,
indent = 4,
separators = (',', ': ')
))
# Testing Cancel Order
vals = [
b'X',
1234,
0
]
b = Util.package(vals)
msg_dict = Util.unpackage(b[0:1], b[1:])
print(json.dumps( # Make the dictionary easy to read
msg_dict,
indent = 4,
separators = (',', ': ')
))
|
from sympy.ntheory.generate import Sieve, sieve
from sympy.ntheory.primetest import (
mr,
is_lucas_prp,
is_square,
is_strong_lucas_prp,
is_extra_strong_lucas_prp,
isprime,
is_euler_pseudoprime,
)
from sympy.testing.pytest import slow
def test_euler_pseudoprimes():
assert is_euler_pseudoprime(9, 1) == True
assert is_euler_pseudoprime(341, 2) == False
assert is_euler_pseudoprime(121, 3) == True
assert is_euler_pseudoprime(341, 4) == True
assert is_euler_pseudoprime(217, 5) == False
assert is_euler_pseudoprime(185, 6) == False
assert is_euler_pseudoprime(55, 111) == True
assert is_euler_pseudoprime(115, 114) == True
assert is_euler_pseudoprime(49, 117) == True
assert is_euler_pseudoprime(85, 84) == True
assert is_euler_pseudoprime(87, 88) == True
assert is_euler_pseudoprime(49, 128) == True
assert is_euler_pseudoprime(39, 77) == True
assert is_euler_pseudoprime(9881, 30) == True
assert is_euler_pseudoprime(8841, 29) == False
assert is_euler_pseudoprime(8421, 29) == False
assert is_euler_pseudoprime(9997, 19) == True
@slow
def test_prps():
oddcomposites = [n for n in range(1, 10 ** 5) if n % 2 and not isprime(n)]
# A checksum would be better.
assert sum(oddcomposites) == 2045603465
assert [n for n in oddcomposites if mr(n, [2])] == [
2047,
3277,
4033,
4681,
8321,
15841,
29341,
42799,
49141,
52633,
65281,
74665,
80581,
85489,
88357,
90751,
]
assert [n for n in oddcomposites if mr(n, [3])] == [
121,
703,
1891,
3281,
8401,
8911,
10585,
12403,
16531,
18721,
19345,
23521,
31621,
44287,
47197,
55969,
63139,
74593,
79003,
82513,
87913,
88573,
97567,
]
assert [n for n in oddcomposites if mr(n, [325])] == [
9,
25,
27,
49,
65,
81,
325,
341,
343,
697,
1141,
2059,
2149,
3097,
3537,
4033,
4681,
4941,
5833,
6517,
7987,
8911,
12403,
12913,
15043,
16021,
20017,
22261,
23221,
24649,
24929,
31841,
35371,
38503,
43213,
44173,
47197,
50041,
55909,
56033,
58969,
59089,
61337,
65441,
68823,
72641,
76793,
78409,
85879,
]
assert not any(mr(n, [9345883071009581737]) for n in oddcomposites)
assert [n for n in oddcomposites if is_lucas_prp(n)] == [
323,
377,
1159,
1829,
3827,
5459,
5777,
9071,
9179,
10877,
11419,
11663,
13919,
14839,
16109,
16211,
18407,
18971,
19043,
22499,
23407,
24569,
25199,
25877,
26069,
27323,
32759,
34943,
35207,
39059,
39203,
39689,
40309,
44099,
46979,
47879,
50183,
51983,
53663,
56279,
58519,
60377,
63881,
69509,
72389,
73919,
75077,
77219,
79547,
79799,
82983,
84419,
86063,
90287,
94667,
97019,
97439,
]
assert [n for n in oddcomposites if is_strong_lucas_prp(n)] == [
5459,
5777,
10877,
16109,
18971,
22499,
24569,
25199,
40309,
58519,
75077,
97439,
]
assert [n for n in oddcomposites if is_extra_strong_lucas_prp(n)] == [
989,
3239,
5777,
10877,
27971,
29681,
30739,
31631,
39059,
72389,
73919,
75077,
]
def test_isprime():
s = Sieve()
s.extend(100000)
ps = set(s.primerange(2, 100001))
for n in range(100001):
# if (n in ps) != isprime(n): print n
assert (n in ps) == isprime(n)
assert isprime(179424673)
assert isprime(20678048681)
assert isprime(1968188556461)
assert isprime(2614941710599)
assert isprime(65635624165761929287)
assert isprime(1162566711635022452267983)
assert isprime(77123077103005189615466924501)
assert isprime(3991617775553178702574451996736229)
assert isprime(273952953553395851092382714516720001799)
assert isprime(
int(
"""
531137992816767098689588206552468627329593117727031923199444138200403\
559860852242739162502265229285668889329486246501015346579337652707239\
409519978766587351943831270835393219031728127"""
)
)
# Some Mersenne primes
assert isprime(2 ** 61 - 1)
assert isprime(2 ** 89 - 1)
assert isprime(2 ** 607 - 1)
# (but not all Mersenne's are primes
assert not isprime(2 ** 601 - 1)
# pseudoprimes
# -------------
# to some small bases
assert not isprime(2152302898747)
assert not isprime(3474749660383)
assert not isprime(341550071728321)
assert not isprime(3825123056546413051)
# passes the base set [2, 3, 7, 61, 24251]
assert not isprime(9188353522314541)
# large examples
assert not isprime(877777777777777777777777)
# conjectured psi_12 given at http://mathworld.wolfram.com/StrongPseudoprime.html
assert not isprime(318665857834031151167461)
# conjectured psi_17 given at http://mathworld.wolfram.com/StrongPseudoprime.html
assert not isprime(564132928021909221014087501701)
# Arnault's 1993 number; a factor of it is
# 400958216639499605418306452084546853005188166041132508774506\
# 204738003217070119624271622319159721973358216316508535816696\
# 9145233813917169287527980445796800452592031836601
assert not isprime(
int(
"""
803837457453639491257079614341942108138837688287558145837488917522297\
427376533365218650233616396004545791504202360320876656996676098728404\
396540823292873879185086916685732826776177102938969773947016708230428\
687109997439976544144845341155872450633409279022275296229414984230688\
1685404326457534018329786111298960644845216191652872597534901"""
)
)
# Arnault's 1995 number; can be factored as
# p1*(313*(p1 - 1) + 1)*(353*(p1 - 1) + 1) where p1 is
# 296744956686855105501541746429053327307719917998530433509950\
# 755312768387531717701995942385964281211880336647542183455624\
# 93168782883
assert not isprime(
int(
"""
288714823805077121267142959713039399197760945927972270092651602419743\
230379915273311632898314463922594197780311092934965557841894944174093\
380561511397999942154241693397290542371100275104208013496673175515285\
922696291677532547504444585610194940420003990443211677661994962953925\
045269871932907037356403227370127845389912612030924484149472897688540\
6024976768122077071687938121709811322297802059565867"""
)
)
sieve.extend(3000)
assert isprime(2819)
assert not isprime(2931)
assert not isprime(2.0)
def test_is_square():
assert [i for i in range(25) if is_square(i)] == [0, 1, 4, 9, 16]
# issue #17044
assert not is_square(60 ** 3)
assert not is_square(60 ** 5)
assert not is_square(84 ** 7)
assert not is_square(105 ** 9)
assert not is_square(120 ** 3)
|
import datetime
import re
import traceback
from typing import Optional, List, Any, Dict, Set, Union
import json
import os
import time
import sys
import requests
from dev_tools.github_repository import GithubRepository
GITHUB_REPO_NAME = 'cirq'
GITHUB_REPO_ORGANIZATION = 'quantumlib'
ACCESS_TOKEN_ENV_VARIABLE = 'CIRQ_BOT_GITHUB_ACCESS_TOKEN'
# This is needed for updating forks before merging them, because currently the
# github API has no equivalent to the 'Update Branch' button on the website.
# This env variable should be the 'user_session' cookie set by github.
UPDATE_BRANCH_COOKIE_ENV_VARIABLE = 'CIRQ_BOT_UPDATE_BRANCH_COOKIE'
POLLING_PERIOD = datetime.timedelta(seconds=10)
USER_AUTO_MERGE_LABEL = 'automerge'
HEAD_AUTO_MERGE_LABEL = 'front_of_queue_automerge'
AUTO_MERGE_LABELS = [USER_AUTO_MERGE_LABEL, HEAD_AUTO_MERGE_LABEL]
RECENTLY_MODIFIED_THRESHOLD = datetime.timedelta(seconds=30)
def is_recent_date(date: datetime.datetime) -> bool:
d = datetime.datetime.utcnow() - date
return d < RECENTLY_MODIFIED_THRESHOLD
class CannotAutomergeError(RuntimeError):
def __init__(self, *args, may_be_temporary: bool = False):
super().__init__(*args)
self.may_be_temporary = may_be_temporary
class PullRequestDetails:
def __init__(self, payload: Any, repo: GithubRepository) -> None:
self.payload = payload
self.repo = repo
@staticmethod
def from_github(repo: GithubRepository,
pull_id: int) -> 'PullRequestDetails':
"""
References:
https://developer.github.com/v3/pulls/#get-a-single-pull-request
"""
url = ("https://api.github.com/repos/{}/{}/pulls/{}"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Pull check failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
return PullRequestDetails(payload, repo)
@property
def remote_repo(self) -> GithubRepository:
return GithubRepository(
organization=self.payload['head']['repo']['owner']['login'],
name=self.payload['head']['repo']['name'],
access_token=self.repo.access_token)
def is_on_fork(self) -> bool:
local = (
self.repo.organization.lower(),
self.repo.name.lower()
)
remote = (
self.remote_repo.organization.lower(),
self.remote_repo.name.lower()
)
return local != remote
def has_label(self, desired_label: str) -> bool:
return any(label['name'] == desired_label
for label in self.payload['labels'])
@property
def last_updated(self) -> datetime.datetime:
return datetime.datetime.strptime(
self.payload['updated_at'],
'%Y-%m-%dT%H:%M:%SZ')
@property
def modified_recently(self) -> bool:
return is_recent_date(self.last_updated)
@property
def marked_automergeable(self) -> bool:
return any(self.has_label(label) for label in AUTO_MERGE_LABELS)
@property
def pull_id(self) -> int:
return self.payload['number']
@property
def branch_name(self) -> str:
return self.payload['head']['ref']
@property
def base_branch_name(self) -> str:
return self.payload['base']['ref']
@property
def branch_sha(self) -> str:
return self.payload['head']['sha']
@property
def title(self) -> str:
return self.payload['title']
@property
def body(self) -> str:
return self.payload['body']
def check_collaborator_has_write(repo: GithubRepository, username: str
) -> Optional[CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/collaborators/{}/permission"
"?access_token={}".format(repo.organization,
repo.name,
username,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Collaborator check failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
if payload['permission'] not in ['admin', 'write']:
return CannotAutomergeError(
'Only collaborators with write permission can use automerge.')
return None
def check_auto_merge_labeler(repo: GithubRepository, pull_id: int
) -> Optional[CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/events"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Event check failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
relevant = [event
for event in payload
if event['event'] == 'labeled' and
event['label']['name'] in AUTO_MERGE_LABELS]
if not relevant:
return CannotAutomergeError('"automerge" label was never added.')
return check_collaborator_has_write(repo, relevant[-1]['actor']['login'])
def add_comment(repo: GithubRepository, pull_id: int, text: str) -> None:
"""
References:
https://developer.github.com/v3/issues/comments/#create-a-comment
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/comments"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
data = {
'body': text
}
response = requests.post(url, json=data)
if response.status_code != 201:
raise RuntimeError('Add comment failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
def edit_comment(repo: GithubRepository, text: str, comment_id: int) -> None:
"""
References:
https://developer.github.com/v3/issues/comments/#edit-a-comment
"""
url = ("https://api.github.com/repos/{}/{}/issues/comments/{}"
"?access_token={}".format(repo.organization,
repo.name,
comment_id,
repo.access_token))
data = {
'body': text
}
response = requests.patch(url, json=data)
if response.status_code != 200:
raise RuntimeError('Edit comment failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
def get_branch_details(repo: GithubRepository, branch: str) -> Any:
"""
References:
https://developer.github.com/v3/repos/branches/#get-branch
"""
url = ("https://api.github.com/repos/{}/{}/branches/{}"
"?access_token={}".format(repo.organization,
repo.name,
branch,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Failed to get branch details. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode())
def get_pr_statuses(pr: PullRequestDetails) -> List[Dict[str, Any]]:
"""
References:
https://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref
"""
url = ("https://api.github.com/repos/{}/{}/commits/{}/statuses"
"?access_token={}".format(pr.repo.organization,
pr.repo.name,
pr.branch_sha,
pr.repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get statuses failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode())
def get_pr_check_status(pr: PullRequestDetails) -> Any:
"""
References:
https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref
"""
url = ("https://api.github.com/repos/{}/{}/commits/{}/status"
"?access_token={}".format(pr.repo.organization,
pr.repo.name,
pr.branch_sha,
pr.repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get status failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode())
def classify_pr_status_check_state(pr: PullRequestDetails) -> Optional[bool]:
has_failed = False
has_pending = False
check_status = get_pr_check_status(pr)
state = check_status['state']
if state == 'failure':
has_failed = True
elif state == 'pending':
has_pending = True
elif state != 'success':
raise RuntimeError('Unrecognized status state: {!r}'.format(state))
check_data = get_pr_checks(pr)
for check in check_data['check_runs']:
if check['status'] != 'completed':
has_pending = True
elif check['conclusion'] != 'success':
has_failed = True
if has_failed:
return False
if has_pending:
return None
return True
def classify_pr_synced_state(pr: PullRequestDetails) -> Optional[bool]:
"""
References:
https://developer.github.com/v3/pulls/#get-a-single-pull-request
https://developer.github.com/v4/enum/mergestatestatus/
"""
state = pr.payload['mergeable_state'].lower()
classification = {
'behind': False,
'clean': True,
}
return classification.get(state, None)
def get_pr_review_status(pr: PullRequestDetails, per_page: int = 100) -> Any:
"""
References:
https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request
"""
url = (f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}"
f"/pulls/{pr.pull_id}/reviews"
f"?per_page={per_page};access_token={pr.repo.access_token}")
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get review failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode())
def get_pr_checks(pr: PullRequestDetails) -> Dict[str, Any]:
"""
References:
https://developer.github.com/v3/checks/runs/#list-check-runs-for-a-specific-ref
"""
url = ("https://api.github.com/repos/{}/{}/commits/{}/check-runs"
"?access_token={}".format(pr.repo.organization,
pr.repo.name,
pr.branch_sha,
pr.repo.access_token))
response = requests.get(
url,
headers={'Accept': 'application/vnd.github.antiope-preview+json'})
if response.status_code != 200:
raise RuntimeError(
'Get check-runs failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return json.JSONDecoder().decode(response.content.decode())
_last_print_was_tick = False
def log(*args):
global _last_print_was_tick
if _last_print_was_tick:
print()
_last_print_was_tick = False
print(*args)
def wait_for_polling_period():
global _last_print_was_tick
_last_print_was_tick = True
print('.', end='', flush=True)
time.sleep(POLLING_PERIOD.total_seconds())
def absent_status_checks(pr: PullRequestDetails,
master_data: Optional[Any] = None) -> Set[str]:
if pr.base_branch_name == 'master' and master_data is not None:
branch_data = master_data
else:
branch_data = get_branch_details(pr.repo, pr.base_branch_name)
status_data = get_pr_statuses(pr)
check_data = get_pr_checks(pr)
statuses_present = {status['context'] for status in status_data}
checks_present = {check['name'] for check in check_data['check_runs']}
reqs = branch_data['protection']['required_status_checks']['contexts']
return set(reqs) - statuses_present - checks_present
def get_repo_ref(repo: GithubRepository, ref: str) -> Dict[str, Any]:
"""
References:
https://developer.github.com/v3/git/refs/#get-a-reference
"""
url = ("https://api.github.com/repos/{}/{}/git/refs/{}"
"?access_token={}".format(repo.organization,
repo.name,
ref,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Refs get failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
return payload
def get_master_sha(repo: GithubRepository) -> str:
ref = get_repo_ref(repo, 'heads/master')
return ref['object']['sha']
def list_pr_comments(repo: GithubRepository, pull_id: int
) -> List[Dict[str, Any]]:
"""
References:
https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/comments"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Comments get failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
return payload
def delete_comment(repo: GithubRepository, comment_id: int) -> None:
"""
References:
https://developer.github.com/v3/issues/comments/#delete-a-comment
"""
url = ("https://api.github.com/repos/{}/{}/issues/comments/{}"
"?access_token={}".format(repo.organization,
repo.name,
comment_id,
repo.access_token))
response = requests.delete(url)
if response.status_code != 204:
raise RuntimeError(
'Comment delete failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
def attempt_update_branch_button(pr: PullRequestDetails
) -> Union[bool, CannotAutomergeError]:
session_cookie = os.getenv(UPDATE_BRANCH_COOKIE_ENV_VARIABLE)
if session_cookie is None:
return attempt_sync_with_master(pr)
# Get the pull request page.
pr_url = 'https://github.com/{}/{}/pull/{}'.format(
pr.repo.organization,
pr.repo.name,
pr.pull_id)
cookies = {'user_session': session_cookie}
response = requests.get(pr_url, cookies=cookies)
if response.status_code != 200:
raise RuntimeError(
'Failed to read PR page. Code: {}. Content: {}.'.format(
response.status_code, response.content))
# Find the update branch button and relevant tokens.
html = response.content.decode()
form_guts = re.match(
'.*<form class="branch-action-btn'
'.*action=".+/pull/.+/update_branch"'
'.*<input name="utf8" type="hidden" value="([^"]+)"'
'.*<input type="hidden" name="authenticity_token" value="([^"]+)"'
'.*<input type="hidden" name="expected_head_oid" value="([^"]+)"'
'.*</form>.*', html, re.DOTALL)
if form_guts is None:
if '(Logged out)' in html:
return CannotAutomergeError('Need a fresh :cookie:.')
raise RuntimeError(
'Failed to find update branch button. Html: {}.'.format(
html))
# Press the update branch button.
data = {
'utf8': '✓',
'authenticity_token': form_guts.group(2),
'expected_head_oid': form_guts.group(3),
}
update_url = 'https://github.com/{}/{}/pull/{}/update_branch'.format(
pr.repo.organization,
pr.repo.name,
pr.pull_id)
update_response = requests.post(update_url,
cookies=dict(response.cookies),
data=data)
if update_response.status_code != 200:
raise RuntimeError(
'Failed to hit update branch button. Code: {}. Content: {}.'.format(
update_response.status_code, update_response.content))
return True
def attempt_sync_with_master(pr: PullRequestDetails
) -> Union[bool, CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/repos/merging/#perform-a-merge
"""
master_sha = get_master_sha(pr.repo)
remote = pr.remote_repo
url = ("https://api.github.com/repos/{}/{}/merges"
"?access_token={}".format(remote.organization,
remote.name,
remote.access_token))
data = {
'base': pr.branch_name,
'head': master_sha,
'commit_message': 'Update branch (automerge)'.format(pr.branch_name)
}
response = requests.post(url, json=data)
if response.status_code == 201:
# Merge succeeded.
log('Synced #{} ({!r}) with master.'.format(pr.pull_id, pr.title))
return True
if response.status_code == 204:
# Already merged.
return False
if response.status_code == 409:
# Merge conflict.
return CannotAutomergeError("There's a merge conflict.")
if response.status_code == 403:
# Permission denied.
return CannotAutomergeError(
"Spurious failure. Github API requires me to be an admin on the "
"fork repository to merge master into the PR branch. Hit "
"'Update Branch' for me before trying again.")
raise RuntimeError('Sync with master failed for unknown reason. '
'Code: {}. Content: {}.'.format(response.status_code,
response.content))
def attempt_squash_merge(pr: PullRequestDetails
) -> Union[bool, CannotAutomergeError]:
"""
References:
https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-button
"""
url = ("https://api.github.com/repos/{}/{}/pulls/{}/merge"
"?access_token={}".format(pr.repo.organization,
pr.repo.name,
pr.pull_id,
pr.repo.access_token))
data = {
'commit_title': '{} (#{})'.format(pr.title, pr.pull_id),
'commit_message': pr.body,
'sha': pr.branch_sha,
'merge_method': 'squash'
}
response = requests.put(url, json=data)
if response.status_code == 200:
# Merge succeeded.
log('Merged PR#{} ({!r}):\n{}\n'.format(
pr.pull_id,
pr.title,
indent(pr.body)))
return True
if response.status_code == 405:
return CannotAutomergeError("Pull Request is not mergeable.")
if response.status_code == 409:
# Need to sync.
return False
raise RuntimeError('Merge failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
def auto_delete_pr_branch(pr: PullRequestDetails) -> bool:
"""
References:
https://developer.github.com/v3/git/refs/#delete-a-reference
"""
open_pulls = list_open_pull_requests(pr.repo, base_branch=pr.branch_name)
if any(open_pulls):
log('Not deleting branch {!r}. It is used elsewhere.'.format(
pr.branch_name))
return False
remote = pr.remote_repo
if pr.is_on_fork():
log('Not deleting branch {!r}. It belongs to a fork ({}/{}).'.format(
pr.branch_name,
pr.remote_repo.organization,
pr.remote_repo.name))
return False
url = ("https://api.github.com/repos/{}/{}/git/refs/heads/{}"
"?access_token={}".format(remote.organization,
remote.name,
pr.branch_name,
remote.access_token))
response = requests.delete(url)
if response.status_code == 204:
# Delete succeeded.
log('Deleted branch {!r}.'.format(pr.branch_name))
return True
log('Delete failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
return False
def branch_data_modified_recently(payload: Any) -> bool:
modified_date = datetime.datetime.strptime(
payload['commit']['commit']['committer']['date'],
'%Y-%m-%dT%H:%M:%SZ')
return is_recent_date(modified_date)
def add_labels_to_pr(repo: GithubRepository,
pull_id: int,
*labels: str,
override_token: str = None) -> None:
"""
References:
https://developer.github.com/v3/issues/labels/#add-labels-to-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/labels"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
override_token or repo.access_token))
response = requests.post(url, json=list(labels))
if response.status_code != 200:
raise RuntimeError(
'Add labels failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
def remove_label_from_pr(repo: GithubRepository,
pull_id: int,
label: str) -> bool:
"""
References:
https://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/labels/{}"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
label,
repo.access_token))
response = requests.delete(url)
if response.status_code == 404:
payload = json.JSONDecoder().decode(response.content.decode())
if payload['message'] == 'Label does not exist':
return False
if response.status_code == 200:
# Removed the label.
return True
raise RuntimeError(
'Label remove failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
def list_open_pull_requests(repo: GithubRepository,
base_branch: Optional[str] = None
) -> List[PullRequestDetails]:
url = ("https://api.github.com/repos/{}/{}/pulls"
"?access_token={}".format(repo.organization,
repo.name,
repo.access_token))
data = {
'state': 'open',
}
if base_branch is not None:
data['base'] = base_branch
response = requests.get(url, json=data)
if response.status_code != 200:
raise RuntimeError(
'List pulls failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
pulls = json.JSONDecoder().decode(response.content.decode())
results = [PullRequestDetails(pull, repo) for pull in pulls]
# Filtering via the API doesn't seem to work, so we do it ourselves.
if base_branch is not None:
results = [result for result in results
if result.base_branch_name == base_branch]
return results
def find_auto_mergeable_prs(repo: GithubRepository) -> List[int]:
open_prs = list_open_pull_requests(repo)
auto_mergeable_prs = [pr for pr in open_prs if pr.marked_automergeable]
return [pr.payload['number'] for pr in auto_mergeable_prs]
def find_problem_with_automergeability_of_pr(
pr: PullRequestDetails,
master_branch_data: Any) -> Optional[CannotAutomergeError]:
# Sanity.
if pr.payload['state'] != 'open':
return CannotAutomergeError('Not an open pull request.')
if pr.base_branch_name != 'master':
return CannotAutomergeError('Can only automerge into master.')
if pr.payload['mergeable_state'] == 'dirty':
return CannotAutomergeError('There are merge conflicts.')
# If a user removes the automerge label, remove the head label for them.
if (pr.has_label(HEAD_AUTO_MERGE_LABEL) and
not pr.has_label(USER_AUTO_MERGE_LABEL)):
return CannotAutomergeError(
f'The {USER_AUTO_MERGE_LABEL} label was removed.',
may_be_temporary=True)
# Only collaborators with write access can use the automerge labels.
label_problem = check_auto_merge_labeler(pr.repo, pr.pull_id)
if label_problem is not None:
return label_problem
# Check review status.
review_status = get_pr_review_status(pr)
if not any(review['state'] == 'APPROVED' for review in review_status):
return CannotAutomergeError('No approved review.')
if any(review['state'] == 'REQUEST_CHANGES'
for review in review_status):
return CannotAutomergeError('A review is requesting changes.')
# Any failing status checks?
status_check_state = classify_pr_status_check_state(pr)
if status_check_state is False:
return CannotAutomergeError('A status check is failing.')
# Some issues can only be detected after waiting a bit.
if not pr.modified_recently:
# Nothing is setting a required status check.
missing_statuses = absent_status_checks(pr, master_branch_data)
if missing_statuses:
return CannotAutomergeError(
'A required status check is not present.\n\n'
'Missing statuses: {!r}'.format(sorted(missing_statuses)))
# Can't figure out how to make it merge.
if pr.payload['mergeable_state'] == 'blocked':
if status_check_state is True:
return CannotAutomergeError(
"Merging is blocked (I don't understand why).",
may_be_temporary=True)
if pr.payload['mergeable'] is False:
return CannotAutomergeError(
"PR isn't classified as mergeable (I don't understand why).",
may_be_temporary=True)
return None
def cannot_merge_pr(pr: PullRequestDetails, reason: CannotAutomergeError):
log('Cancelled automerge of PR#{} ({!r}): {}'.format(
pr.pull_id,
pr.title,
reason.args[0]))
add_comment(pr.repo,
pr.pull_id,
'Automerge cancelled: {}'.format(reason))
for label in AUTO_MERGE_LABELS:
if pr.has_label(label):
remove_label_from_pr(pr.repo, pr.pull_id, label)
def drop_temporary(pr: PullRequestDetails,
problem: Optional[CannotAutomergeError],
prev_seen_times: Dict[int, datetime.datetime],
next_seen_times: Dict[int, datetime.datetime],
) -> Optional[CannotAutomergeError]:
"""Filters out problems that may be temporary."""
if problem is not None and problem.may_be_temporary:
since = prev_seen_times.get(pr.pull_id, datetime.datetime.utcnow())
if is_recent_date(since):
next_seen_times[pr.pull_id] = since
return None
return problem
def gather_auto_mergeable_prs(
repo: GithubRepository,
problem_seen_times: Dict[int, datetime.datetime]
) -> List[PullRequestDetails]:
result = []
raw_prs = list_open_pull_requests(repo)
master_branch_data = get_branch_details(repo, 'master')
if branch_data_modified_recently(master_branch_data):
return []
prev_seen_times = dict(problem_seen_times)
problem_seen_times.clear()
for raw_pr in raw_prs:
if not raw_pr.marked_automergeable:
continue
# Looking up a single PR gives more data, e.g. the 'mergeable' entry.
pr = PullRequestDetails.from_github(repo, raw_pr.pull_id)
problem = find_problem_with_automergeability_of_pr(pr,
master_branch_data)
if problem is None:
result.append(pr)
persistent_problem = drop_temporary(pr,
problem,
prev_seen_times=prev_seen_times,
next_seen_times=problem_seen_times)
if persistent_problem is not None:
cannot_merge_pr(pr, persistent_problem)
return result
def merge_desirability(pr: PullRequestDetails) -> Any:
synced = classify_pr_synced_state(pr) is True
tested = synced and (classify_pr_status_check_state(pr) is True)
forked = pr.is_on_fork()
# 1. Prefer to merge already-synced PRs. This minimizes the number of builds
# performed by travis.
# 2. Prefer to merge synced PRs from forks. This minimizes manual labor;
# currently the bot can't resync these PRs. Secondarily, avoid unsynced
# PRs from forks until necessary because they will fail when hit.
# 3. Prefer to merge PRs where the status checks have already completed.
# This is just faster, because the next build can be started sooner.
# 4. Use seniority as a tie breaker.
# Desired order is:
# TF
# SF
# T_
# S_
# __
# _F
# (S = synced, T = tested, F = forked.)
if forked:
if tested:
rank = 5
elif synced:
rank = 4
else:
rank = 0
else:
if tested:
rank = 3
elif synced:
rank = 2
else:
rank = 1
return rank, -pr.pull_id
def pick_head_pr(active_prs: List[PullRequestDetails]
) -> Optional[PullRequestDetails]:
if not active_prs:
return None
for pr in sorted(active_prs, key=merge_desirability, reverse=True):
if pr.has_label(HEAD_AUTO_MERGE_LABEL):
return pr
promoted = max(active_prs, key=merge_desirability)
log('Front of queue: PR#{} ({!r})'.format(promoted.pull_id, promoted.title))
add_labels_to_pr(promoted.repo, promoted.pull_id, HEAD_AUTO_MERGE_LABEL)
return promoted
def duty_cycle(repo: GithubRepository,
persistent_temporary_problems: Dict[int, datetime.datetime]):
active_prs = gather_auto_mergeable_prs(repo,
persistent_temporary_problems)
head_pr = pick_head_pr(active_prs)
if head_pr is None:
return
state = classify_pr_synced_state(head_pr)
if state is False:
result = attempt_update_branch_button(head_pr)
elif state is True:
result = attempt_squash_merge(head_pr)
if result is True:
auto_delete_pr_branch(head_pr)
for label in AUTO_MERGE_LABELS:
remove_label_from_pr(repo, head_pr.pull_id, label)
else:
# `gather_auto_mergeable_prs` is responsible for this case.
result = False
if isinstance(result, CannotAutomergeError):
cannot_merge_pr(head_pr, result)
def indent(text: str) -> str:
return ' ' + text.replace('\n', '\n ')
def main():
access_token = os.getenv(ACCESS_TOKEN_ENV_VARIABLE)
if not access_token:
print('{} not set.'.format(ACCESS_TOKEN_ENV_VARIABLE), file=sys.stderr)
sys.exit(1)
repo = GithubRepository(
organization=GITHUB_REPO_ORGANIZATION,
name=GITHUB_REPO_NAME,
access_token=access_token)
log('Watching for automergeable PRs.')
problem_seen_times = {} # type: Dict[int, datetime.datetime]
while True:
try:
duty_cycle(repo, problem_seen_times)
except Exception: # Anything but a keyboard interrupt / system exit.
traceback.print_exc()
wait_for_polling_period()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: __init__.py
Description :
Author : cat
date: 2018/1/22
-------------------------------------------------
Change Activity:
2018/1/22:
-------------------------------------------------
"""
|
from tkinter import*
import math,random,os
from tkinter import messagebox
class Bill_App:
def __init__(self, root):
self.root = root
self.root.geometry("1350x700+0+0")
self.root.title("AIMS BILL-SOFTWARE")
bg_color = "#074463"
#======= Please Enter Your Shop name.
title = Label(self.root,text=" ARYA JAN SEVA KENDRA BILLING-SOFTWARE", bd=10, relief=GROOVE, bg=bg_color, fg="Yellow",font=("time new roman", 30, "bold"), pady=0).pack(fill=X)
#=============Varibale==========
#=============B/W Copy==========
#======= Please Enter Your Items Name.
self.Copy=IntVar()
self.PhotoGrapy=IntVar()
self.Color=IntVar()
self.Online=IntVar()
self.Offline=IntVar()
self.Scan=IntVar()
#=============Online Works ===========
self.Ration_Card=IntVar()
self.Adhaar_Card=IntVar()
self.Pan_Card=IntVar()
self.onprnt_lbl=IntVar()
self.Adhaar_Money=IntVar()
self.Mail_lbl=IntVar()
#==========Total Product & Tax Variable=========
self.Copy_price=StringVar()
self.Online_price=StringVar()
self.Money_Transfer=StringVar()
self.Copys_Tax=StringVar()
self.Online_Tax=StringVar()
self.Money_Transfer_Tax=StringVar()
#============Customer================
self.c_name=StringVar()
self.c_phone=StringVar()
self.bill_no=StringVar()
x=random.randint(1000,9999)
self.bill_no.set(str(x))
self.bill_search=StringVar()
#===============Customer Detail Frame
F1=LabelFrame(self.root,text="Customer Detail",font=("time new roman",15,"bold"),fg="gold",bg=bg_color)
F1.place(x=0,y=80,relwidth=1)
cname_lbl=Label(F1,text="Customer Name",bg=bg_color, fg="white",font=("time new roman",18,"bold")).grid(row=0,column=0,padx=1,pady=5)
cname_txt=Entry(F1,width=10,textvariable=self.c_name,font="arial 15",bd=7,relief=SUNKEN).grid(row=0,column=1,pady=5,padx=10)
cphn_lbl=Label(F1,text="Mob.No",bg=bg_color, fg="white",font=("time new roman",18,"bold")).grid(row=0,column=2,padx=1,pady=5)
cphn_txt=Entry(F1,width=10,textvariable=self.c_phone,font="arial 15",bd=7,relief=SUNKEN).grid(row=0,column=3,pady=5,padx=10)
c_bill_lbl=Label(F1,text="Bill_Number",bg=bg_color, fg="white",font=("time new roman",18,"bold")).grid(row=0,column=4,padx=1,pady=5)
c_bill_txt=Entry(F1,width=10,textvariable=self.bill_no,font="arial 15",bd=7,relief=SUNKEN).grid(row=0,column=5,pady=5,padx=10)
bill_btn=Button(F1,text="Search",command=self.find_bill,width=10,bd=7,font="arial 12 bold").grid(row=0,column=6,padx=5,pady=10)
#==============All-Photocopy============
F2=LabelFrame(self.root,bd=10,relief=GROOVE,text="Black-White Copy",font=("time new roman",15,"bold"),fg="gold",bg=bg_color)
F2.place(x=5 ,y=180,width=325,height=380)
#======= Please Enter Your Items Name.
copy_lbl=Label(F2,text="B/W copy",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=0,column=0,padx=10,pady=10,sticky="w")
copy_txt=Entry(F2,width=8,textvariable=self.Copy,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=0,column=1,padx=10,pady=10)
Hcopy_lbl=Label(F2,text="PhotoGrapy",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=1,column=0,padx=10,pady=10,sticky="w")
Hcopy_txt=Entry(F2,width=8,textvariable=self.PhotoGrapy,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=1,column=1,padx=10,pady=10)
copy_lbl=Label(F2,text="Color-copy",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=2,column=0,padx=10,pady=10,sticky="w")
bath_txt=Entry(F2,width=8,textvariable=self.Color,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=2,column=1,padx=10,pady=10)
onprnt_lbl=Label(F2,text="Online-print",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=3,column=0,padx=10,pady=10,sticky="w")
onprnt_txt=Entry(F2,width=8,textvariable=self.Online,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=3,column=1,padx=10,pady=10)
ofprnt_lbl=Label(F2,text="offline-print",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=4,column=0,padx=10,pady=10,sticky="w")
ofprnt_txt=Entry(F2,width=8,textvariable=self.Offline,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=4,column=1,padx=10,pady=10)
BooK_lbl=Label(F2,text="Scan Doc",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=5,column=0,padx=10,pady=10,sticky="w")
BooK_txt=Entry(F2,width=8,textvariable=self.Scan , font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=5,column=1,padx=10,pady=10)
#=============Online Works==============
F3=LabelFrame(self.root,bd=10,relief=GROOVE,text="Online Works",font=("time new roman",15,"bold"),fg="gold",bg=bg_color)
F3.place(x=340 ,y=180,width=325,height=380)
copy_lbl=Label(F3,text="Ration Card",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=0,column=0,padx=10,pady=10,sticky="w")
copy_txt=Entry(F3,width=8,textvariable=self.Ration_Card,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=0,column=1,padx=10,pady=10)
Hcopy_lbl=Label(F3,text="Adhaar Card",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=1,column=0,padx=10,pady=10,sticky="w")
Hcopy_txt=Entry(F3,width=8, textvariable=self.Adhaar_Card,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=1,column=1,padx=10,pady=10)
copy_lbl=Label(F3,text="Pan Card",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=2,column=0,padx=10,pady=10,sticky="w")
bath_txt=Entry(F3,width=8,textvariable=self.Pan_Card,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=2,column=1,padx=10,pady=10)
onprnt_lbl=Label(F3,text="I/C/D Certificate",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=3,column=0,padx=10,pady=10,sticky="w")
onprnt_txt=Entry(F3,width=8,textvariable=self.onprnt_lbl,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=3,column=1,padx=10,pady=10)
Adhr_lbl=Label(F3,text="Adhaar Money",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=4,column=0,padx=10,pady=10,sticky="w")
Adhr_txt=Entry(F3,width=8,textvariable=self.Adhaar_Money,font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=4,column=1,padx=10,pady=10)
Mail_lbl=Label(F3,text="Mail Documents",font=("time new roman",16,"bold"),bg=bg_color ,fg="lightgreen").grid(row=5,column=0,padx=10,pady=10,sticky="w")
Mail_txt=Entry(F3,width=8, textvariable=self.Mail_lbl, font=("time new roman",16,"bold"),bd=5,relief=SUNKEN).grid(row=5,column=1,padx=10,pady=10)
#=============BIll Area================
F5=LabelFrame(self.root,bd=10,relief=GROOVE)
F5.place(x=670,y=180,width=350,height=380)
bill_title=Label(F5,text="Bill Area ",font="arial 15 bold",bd=7,relief=GROOVE).pack(fill=X)
scrol_y=Scrollbar(F5,orient=VERTICAL)
self.txtarea=Text(F5,yscrollcommand=scrol_y.set)
scrol_y.pack(side=RIGHT,fill=Y)
scrol_y.config(command=self.txtarea.yview)
self.txtarea.pack(fill=BOTH,expand=1)
#==========ButtonFrame============
F6=LabelFrame(self.root,bd=10,relief=GROOVE,text="Bill Menu",font=("time new roman",15,"bold"),fg="gold",bg=bg_color)
F6.place(x=0,y=560,relwidth=1,height=130)
m1=Label(F6,text="Total Copy Price",bg=bg_color,fg="red",font=("times new roman ",10,"bold")).grid(row=0,column=0,padx=10,pady=1,sticky="w")
m1_txt=Entry(F6,width=12, textvariable=self.Copy_price,font="arial 10 bold",bd=7,relief=SUNKEN).grid(row=0,column=1,padx=10,pady=1)
m2=Label(F6,text="Total Online Work Price",bg=bg_color,fg="red",font=("times new roman ",10,"bold")).grid(row=1,column=0,padx=10,pady=1,sticky="w")
m2_txt=Entry(F6,width=12,textvariable=self.Online_price,font="arial 10 bold",bd=7,relief=SUNKEN).grid(row=1,column=1,padx=10,pady=1)
m6=Label(F6,text="Created By:Anshu Gurjar",bg=bg_color,width=22,fg="yellow",font=("times new roman ",10,"bold")).grid(row=2,column=0,padx=4,pady=1,sticky="w")
# m6_Lable=Entry(F6,width=10,textvariable=self.Money_Transfer,font="arial 10 bold",bd=7,relief=SUNKEN).grid(row=2,column=1,padx=10,pady=1)
#=================Tax======================
m3=Label(F6,text="Copys Tax:",bg=bg_color,fg="red",font=("times new roman ",10,"bold")).grid(row=0,column=2,padx=10,pady=1,sticky="w")
m3_txt=Entry(F6,width=10,textvariable=self.Copys_Tax,font="arial 10 bold",bd=7,relief=SUNKEN).grid(row=0,column=3,padx=8,pady=1)
m4=Label(F6,text="Online Work Tax:",bg=bg_color,fg="red",font=("times new roman ",10,"bold")).grid(row=1,column=2,padx=10,pady=1,sticky="w")
m4_txt=Entry(F6,width=10,textvariable=self.Online_Tax,font="arial 10 bold",bd=7,relief=SUNKEN).grid(row=1,column=3,padx=5,pady=1)
m7=Label(F6,text="AIMS Industries ",width=15,bg=bg_color,fg="yellow",font=("times new roman ",10,"bold")).grid(row=2,column=2,padx=10,pady=1,sticky="w")
# ====================== Button Frame================
btn_F=Frame(F6,bd=7,relief=GROOVE)
btn_F.place(x=570,width=430,height=85)
total_btn=Button(btn_F,command=self.total,text="Total",bg="cadetblue",fg="Black",pady=15,bd=5,width=11, font="arial 9 bold").grid(row=0, column=0,padx=5,pady=5)
Bill_btn=Button(btn_F,command=self.bill_area,text="Genrate Bill",bg="cadetblue",fg="Black",pady=15,bd=5,width=11, font="arial 9 bold").grid(row=0, column=1,padx=5,pady=5)
Clear_btn=Button(btn_F,command=self.clear_data,text="Clear",bg="cadetblue",fg="Black",pady=15,bd=5,width=11, font="arial 9 bold").grid(row=0, column=2,padx=5,pady=5)
Exit_btn=Button(btn_F,command=self.Exit_app, text="Exit",bg="cadetblue",fg="Black",pady=15,bd=5 ,width=11, font="arial 9 bold").grid(row=0, column=3,padx=5,pady=5)
self.welcome_bill()
def total(self):
self.c_p_p=self.Copy.get()*2
self.p_g_p=self.PhotoGrapy.get()*30
self.c_l_p=self.Color.get()*10
self.o_l_p=self.Online.get()*10
self.o_f_p=self.Offline.get()*10
self.s_g_p=self.Scan.get()*10
self.total_Copy_price=float(
self.c_p_p+
self.p_g_p+
self.c_l_p+
self.o_l_p+
self.o_f_p+
self.s_g_p
)
self.Copy_price.set("Rs."+str(self.total_Copy_price))
self.C_Tax=round((self.total_Copy_price*0.03),2)
self.Copys_Tax.set("Rs."+str(self.C_Tax))
self.o_r_p=self.Ration_Card.get()*100
self.o_a_p=self.Adhaar_Card.get()*60
self.o_p_p=self.Pan_Card.get()*200
self.o_l_p=self.onprnt_lbl.get()*100
self.o_a_p=self.Adhaar_Money.get()*30
self.o_m_p=self.Mail_lbl.get()*10
self.total_Online_price=float(
self.o_r_p+
self.o_a_p+
self.o_p_p+
self.o_l_p+
self.o_a_p+
self.o_m_p
)
self.Online_price.set("Rs."+str(self.total_Online_price))
self.o_tax=round((self.total_Online_price*0.05),2)
self.Online_Tax.set("Rs."+str(self.o_tax))
self.Total_bill=float(self.total_Copy_price+
self.total_Online_price+
self.C_Tax+
self.o_tax
)
def welcome_bill(self):
#self.txtarea.delete('1.0',End)
self.txtarea.insert(END," **Welcome To Arya Jan Seva Kendra**")
self.txtarea.insert(END,f"\n Bill Number: {self.bill_no.get()}")
self.txtarea.insert(END,f"\n Shop Number: 9891960810,9911614006")
self.txtarea.insert(END,f"\n Gmail:cscsakipur@gmail.com")
self.txtarea.insert(END,f"\n ===================================")
self.txtarea.insert(END,f"\n Customer Name : {self.c_name.get()}")
self.txtarea.insert(END,f"\n Phone Number : {self.c_phone.get()}")
self.txtarea.insert(END,f"\n=====================================")
self.txtarea.insert(END,f"\n Products\t\tQTY\t\tPrice")
self.txtarea.insert(END,f"\n=====================================")
def bill_area(self):
if self.c_name.get()=="" or self.c_phone.get()=="":
messagebox.showerror("Error","Please Enter The Customer Details !")
elif self.Copy_price.get()=="Rs. 0.0" and self.Online_price.get()=="Rs. 0.0":
messagebox.showerror("Error","No Products Purcheased")
else:
self.welcome_bill()
if self.Copy.get()!=0:
self.txtarea.insert(END,f"\n Copy \t\t{self.Copy.get()}\t\t{self.c_p_p}")
if self.PhotoGrapy.get()!=0:
self.txtarea.insert(END,f"\n PhotoGrapy \t\t{self.PhotoGrapy.get()}\t\t{self.p_g_p}")
if self.Color.get()!=0:
self.txtarea.insert(END,f"\n Color Copy \t\t{self.Color.get()}\t\t{self.c_l_p}")
if self.Online.get()!=0:
self.txtarea.insert(END,f"\n Online Print \t\t{self.Online.get()}\t\t{self.o_l_p}")
if self.Offline.get()!=0:
self.txtarea.insert(END,f"\n Offline Print \t\t{self.Offline.get()}\t\t{self.o_f_p}")
if self.Scan.get()!=0:
self.txtarea.insert(END,f"\n Scan Docs \t\t{self.Scan.get()}\t\t{self.s_g_p}")
#=====================Online Works=================
if self.Ration_Card.get()!=0:
self.txtarea.insert(END,f"\n Ration Card \t\t{self.Ration_Card.get()}\t\t{self.o_r_p}")
if self.Adhaar_Card.get()!=0:
self.txtarea.insert(END,f"\n Adhaar Card \t\t{self.Adhaar_Card.get()}\t\t{self.o_a_p}")
if self.Pan_Card.get()!=0:
self.txtarea.insert(END,f"\n Pan Card \t\t{self.Pan_Card.get()}\t\t{self.o_p_p}")
if self.onprnt_lbl.get()!=0:
self.txtarea.insert(END,f"\n I/C/D Certif \t\t{self.onprnt_lbl.get()}\t\t{self.o_l_p}")
if self.Adhaar_Money.get()!=0:
self.txtarea.insert(END,f"\n Adhaar Money \t\t{self.Adhaar_Money.get()}\t\t{self.o_a_p}")
if self.Mail_lbl.get()!=0:
self.txtarea.insert(END,f"\n Share Docs\t\t{self.Mail_lbl.get()}\t\t{self.o_m_p}")
self.txtarea.insert(END,f"\n======================================")
if self.Copys_Tax.get()!="Rs.0.0":
self.txtarea.insert(END,f"\n Copy Tax:\t\t\t{self.Copys_Tax.get()}")
if self.Online_Tax.get()!="Rs. 0.0":
self.txtarea.insert(END,f"\n Online Works Tax:\t\t\t{self.Online_Tax.get()}")
self.txtarea.insert(END,f"\n======================================")
self.txtarea.insert(END,f"\n Total Bill:\t\t\t{str(self.Total_bill)}")
self.txtarea.insert(END,f"\n======================================")
self.save_Bill()
def save_Bill(self):
op=messagebox.askyesno("Save Bill","Do you save The Bill ?")
if op>0:
self.bill_data=self.txtarea.get('1.0',END)
f1=open("Bills/"+str(self.bill_no.get())+".txt","w")
f1.write(self.bill_data)
f1.close()
messagebox.showinfo("Saved Bill",f"Bill.No :{self.bill_no.get()} Saved Successfully")
else:
return
def find_bill(self):
present="no"
for i in os.listdir("bills/"):
if i.split('.')[0]==self.bill_search.get():
f1=open(f"bills/{i}","r")
self.txtarea.delete('1.0',END)
for d in f1:
self.txtarea.insert(END,d)
f1.close()
present="yes"
if present=="no":
messagebox.showerror("Error","Invalid Bill No.")
def clear_data(self):
op=messagebox.askyesno("Clear","Do you really want to Clear?")
if op>0:
#=============B/W Copy==========
self.Copy.set(0)
self.PhotoGrapy.set(0)
self.Color.set(0)
self.Online.set(0)
self.Offline.set(0)
self.Scan.set(0)
#=============Online Works ===========
self.Ration_Card.set(0)
self.Adhaar_Card.set(0)
self.Pan_Card.set(0)
self.onprnt_lbl.set(0)
self.Adhaar_Money.set(0)
self.Mail_lbl.set(0)
#==========Total Product & Tax Variable=========
self.Copy_price.set("")
self.Online_price.set("")
self.Money_Transfer.set("")
self.Copys_Tax.set("")
self.Online_Tax.set("")
self.Money_Transfer_Tax.set("")
#============Customer================
self.c_name.set("")
self.c_phone.set("")
self.bill_no.set("")
x=random.randint(1000,9999)
self.bill_no.set(str(x))
self.bill_search.set("")
self.welcome_bill()
def Exit_app(self):
op=messagebox.askyesno("Exit","Do you really want to exit")
if op>0:
self.root.destroy()
root = Tk()
obj = Bill_App(root)
root.mainloop()
|
# coding=utf-8
from __future__ import unicode_literals
from ..address import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ('{{city_name}}', )
street_name_formats = (
'{{first_name}}-{{last_name}}-{{street_suffix_long}}',
'{{last_name}}{{street_suffix_short}}'
)
street_address_formats = ('{{street_name}} {{building_number}}', )
address_formats = ('{{street_address}}\n{{postcode}} {{city}}', )
building_number_formats = ('###', '##', '#', '#/#')
street_suffixes_long = (
'Gasse', 'Platz', 'Ring', 'Straße', 'Weg', 'Allee'
)
street_suffixes_short = (
'gasse', 'platz', 'ring', 'straße', 'str.', 'weg', 'allee'
)
postcode_formats = ('#####', )
cities = (
'Augsburg', 'Aschaffenburg', 'Aachen', 'Auerbach', 'Ahaus',
'Badibling', 'Amberg', 'Ansbach', 'Angermünde', 'Anklam',
'Altötting', 'Apolda', 'Arnstadt', 'Artern', 'Altentreptow', 'Aue',
'Aurich', 'Berlin', 'Bamberg', 'Böblingen', 'Bernburg', 'Brand',
'Erbisdorf', 'Beilngries', 'Belzig', 'Berchtesgaden', 'Biedenkopf',
'Bischofswerda', 'Backnang', 'Borna', 'Bogen', 'Borken', 'Bruchsal',
'Brandenburg', 'Burg', 'Brilon', 'Bad Brückenau', 'Bremervörde',
'Bersenbrück', 'Beeskow', 'Bayreuth', 'Bitterfeld', 'Burgdorf',
'Burglengenfeld', 'Büsingenm Hochrhein', 'Bützow', 'Bautzen',
'Bergzabern', 'Chemnitz', 'Calau', 'Cottbus', 'Celle', 'Cloppenburg',
'Coburg', 'Crailsheim', 'Cuxhaven', 'Calw', 'Darmstadt', 'Dachau',
'Badoberan', 'Dresden', 'Dessau', 'Deggendorf', 'Diepholz',
'Dieburg', 'Dinslaken', 'Dinkelsbühl', 'Döbeln', 'Demmin', 'Düren',
'Donaueschingen', 'Duderstadt', 'Delitzsch', 'Eilenburg',
'Ebersberg', 'Ebern', 'Ebermannstadt', 'Eckernförde', 'Erding',
'Erfurt', 'Eggenfelden', 'Eisenhüttenstadt', 'Eichstätt',
'Eichstätt', 'Eisleben', 'Einbeck', 'Eisenberg', 'Emmendingen',
'Erkelenz', 'Eisenach', 'Euskirchen', 'Eutin', 'Eberswalde', 'Soltau',
'Fallingbostel', 'Fulda', 'Freudenstadt', 'Feuchtwangen',
'Fürstenfeldbruck', 'Main-Höchst)', 'Finsterwalde', 'Flöha',
'Forchheim', 'Forst', 'Bad Freienwalde', 'Freising', 'Freital',
'Füssen', 'Fürstenwalde', 'Gera', 'Gardelegen', 'Garmisch-Partenkirchen',
'Schwäbisch Gmünd', 'Gadebusch', 'Geldern',
'Gerolzhofen', 'Germersheim', 'Gifhorn', 'Groß-Gerau', 'Geithain',
'Gräfenhainichen', 'Gießen', 'Grimmen', 'Gelnhausen', 'Genthin',
'Sankt Goar', 'Sankt Goarshausen', 'Göttingen', 'Göppingen',
'Görlitz', 'Grafenau', 'Großenhain', 'Griesbach Rottal', 'Grimma',
'Gransee', 'Greiz', 'Goslar', 'Gütersloh', 'Gotha', 'Guben',
'Gunzenhausen', 'Güstrow', 'Grevenbroich', 'Grevesmühlen',
'Günzburg', 'Hannover', 'Hammelburg', 'Bremen', 'Hildburghausen',
'Halberstadt', 'Hainichen', 'Hechingen', 'Haldensleben', 'Helmstedt',
'Hersbruck', 'Hettstedt', 'Herford', 'Hagenow', 'Greifswald',
'Hamburg', 'Hohenmölsen', 'Hildesheim', 'Heiligenstadt', 'Lübeck',
'Hannoversch Münden', 'Hofgeismar', 'Holzminden', 'Hohenstein-Ernstthal',
'Rostock', 'Heinsberg', 'Hansestadttralsund', 'Hünfeld',
'Husum', 'Havelberg', 'Wismar', 'Höxter', 'Hoyerswerda', 'Herzberg',
'Ilmenau', 'Illertissen', 'Ingolstadt', 'Iserlohn', 'Jena',
'Jüterbog', 'Jessen', 'Jülich', 'Karlsruhe', 'Kronach', 'Kelheim',
'Kehl', 'Kemnath', 'Bad Kissingen', 'Bad Kreuznach',
'Kaiserslautern', 'Kleve', 'Klötze', 'Kamenz', 'Konstanz', 'Koblenz',
'Kötzting', 'Kassel', 'Kitzingen', 'Kulmbach', 'Kusel', 'Königs Wusterhausen',
'Kyritz', 'Leipziger Land', 'Ludwigsburg',
'Lobenstein', 'Lübz', 'Luckau', 'Lemgo', 'Lüneburg', 'Lüdinghausen',
'Bad Liebenwerda', 'Lichtenfels', 'Lübben', 'Lörrach', 'Lippstadt',
'Bad Langensalza', 'Lüdenscheid', 'Luckenwalde', 'Ludwigslust',
'München', 'Marienberg', 'Mainburg', 'Mallersdorf',
'Marktheidenfeld', 'Miesbach', 'Malchin', 'Magdeburg', 'Mettmann',
'Melsungen', 'Meißen', 'Melle', 'Meppen', 'Merseburg',
'Mellrichstadt', 'Bad Mergentheim', 'Meiningen', 'Mühlhausen',
'Miltenberg', 'Moers', 'Monschau', 'Mühldorfm Inn', 'Mittweida',
'Mayen', 'Nabburg', 'Naila', 'Nauen', 'Neubrandenburg', 'Nordhausen',
'Neuss', 'Neunburg vorm Wald', 'Neustadtner Waldnaab', 'Northeim',
'Norden', 'Nördlingen', 'Neuruppin', 'Neustadtm Rübenberge',
'Nürtingen', 'Neu-Ulm', 'Niesky', 'Neustrelitz', 'Osterburg',
'Ochsenfurt', 'Olpe', 'Osterodem Harz', 'Oranienburg',
'Oberviechtach', 'Oschatz', 'Potsdam', 'Passau', 'Pfaffenhofenner Ilm',
'Parsberg', 'Paderborn', 'Parchim', 'Peine', 'Pegnitz',
'Perleberg', 'Pinneberg', 'Pritzwalk', 'Plauen', 'Pößneck',
'Pirmasens', 'Pasewalk', 'Prenzlau', 'Querfurt', 'Quedlinburg',
'Regensburg', 'Rastatt', 'Ribnitz-Damgarten', 'Recklinghausen',
'Regen', 'Rehau', 'Roth', 'Riesa', 'Rochlitz', 'Rathenow',
'Rosenheim', 'Roding', 'Rockenhausen', 'Rothenburg oberauber',
'Reutlingen', 'Rudolstadt', 'Ravensburg', 'Rottweil', 'Stuttgart',
'Schwandorf', 'Säckingen', 'Stadtsteinach', 'Saarbrücken',
'Strasburg', 'Schleiz', 'Stade', 'Sondershausen', 'Stendal',
'Sebnitz', 'Seelow', 'Scheinfeld', 'Senftenberg', 'Staßfurt',
'Sangerhausen', 'Schwäbisch Hall', 'Suhl', 'Siegen', 'Sigmaringen',
'Saulgau', 'Schmölln', 'Saarlouis', 'Schlüchtern', 'Badalzungen',
'Schwabmünchen', 'Schwerin', 'Soest', 'Schrobenhausen', 'Schongau',
'Soltau', 'Sömmerda', 'Sonneberg', 'Spremberg', 'Strausberg',
'Stadtroda', 'Steinfurt', 'Starnberg', 'Sternberg', 'Stade',
'Staffelstein', 'Stollberg', 'Sulzbach-Rosenberg', 'Schweinfurt',
'Schwarzenberg', 'Tecklenburg', 'Teterow', 'Torgau', 'Tirschenreuth',
'Tuttlingen', 'Tübingen', 'Uelzen', 'Ueckermünde', 'Uffenheim',
'Vechta', 'Vilsbiburg', 'Viersen', 'Viechtach', 'Vohenstrauß',
'Warendorf', 'Wittenberg', 'Worbis', 'Wiedenbrück', 'Werdau',
'Weimar', 'Wertingen', 'Wesel', 'Wolfenbüttel', 'Witzenhausen',
'Wittstock', 'Wolgast', 'Wolmirstedt', 'Wolfach', 'Wolfratshausen',
'Wernigerode', 'Waren', 'Weißenfels', 'Weißwasser', 'Wittmund',
'Waldmünchen', 'Wunsiedel', 'Wurzen', 'Wetzlar', 'Wanzleben',
'Zerbst', 'Zschopau', 'Zeulenroda', 'Zossen'
)
states = (
'Baden-Württemberg', 'Bayern', 'Berlin', 'Brandenburg', 'Bremen',
'Hamburg', 'Hessen', 'Mecklenburg-Vorpommern', 'Niedersachsen',
'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Sachsen',
'Sachsen-Anhalt', 'Schleswig-Holstein', 'Thüringen'
)
countries = (
'Afghanistan', 'Alandinseln', 'Albanien', 'Algerien', 'Amerikanisch-Ozeanien',
'Amerikanisch-Samoa', 'Amerikanische Jungferninseln',
'Andorra', 'Angola', 'Anguilla', 'Antarktis', 'Antigua und Barbuda',
'Argentinien', 'Armenien', 'Aruba', 'Aserbaidschan', 'Australien',
'Ägypten', 'Äquatorialguinea', 'Äthiopien', 'Äußeres Ozeanien',
'Bahamas', 'Bahrain', 'Bangladesch', 'Barbados', 'Belarus', 'Belgien',
'Belize', 'Benin', 'Bermuda', 'Bhutan', 'Bolivien', 'Bosnien und Herzegowina',
'Botsuana', 'Bouvetinsel', 'Brasilien', 'Britische Jungferninseln',
'Britisches Territorium im Indischen Ozean', 'Brunei Darussalam',
'Bulgarien', 'Burkina Faso', 'Burundi', 'Chile', 'China',
'Cookinseln', 'Costa Rica', 'Côte d’Ivoire', 'Demokratische Republik Kongo',
'Demokratische Volksrepublik Korea', 'Deutschland',
'Dominica', 'Dominikanische Republik', 'Dschibuti', 'Dänemark',
'Ecuador', 'El Salvador', 'Eritrea', 'Estland',
'Falklandinseln', 'Fidschi', 'Finnland', 'Frankreich', 'Französisch-Guayana',
'Französisch-Polynesien',
'Färöer', 'Gabun', 'Gambia', 'Georgien', 'Ghana',
'Gibraltar', 'Grenada', 'Griechenland', 'Grönland', 'Guadeloupe',
'Guam', 'Guatemala', 'Guernsey', 'Guinea', 'Guinea-Bissau', 'Guyana',
'Haiti', 'Heard- und McDonald-Inseln', 'Honduras', 'Indien',
'Indonesien', 'Irak', 'Iran', 'Irland', 'Island', 'Isle of Man',
'Israel', 'Italien', 'Jamaika', 'Japan', 'Jemen', 'Jersey',
'Jordanien', 'Kaimaninseln', 'Kambodscha', 'Kamerun', 'Kanada', 'Kap Verde',
'Kasachstan', 'Katar', 'Kenia', 'Kirgisistan', 'Kiribati',
'Kokosinseln', 'Kolumbien', 'Komoren', 'Kongo', 'Kroatien', 'Kuba',
'Kuwait', 'Laos', 'Lesotho', 'Lettland', 'Libanon', 'Liberia',
'Libyen', 'Liechtenstein', 'Litauen', 'Luxemburg', 'Madagaskar',
'Malawi', 'Malaysia', 'Malediven', 'Mali', 'Malta', 'Marokko',
'Marshallinseln', 'Martinique', 'Mauretanien', 'Mauritius',
'Mayotte', 'Mazedonien', 'Mexiko', 'Mikronesien', 'Monaco',
'Mongolei', 'Montenegro', 'Montserrat', 'Mosambik', 'Myanmar',
'Namibia', 'Nauru', 'Nepal', 'Neukaledonien', 'Neuseeland',
'Nicaragua', 'Niederlande', 'Niederländische Antillen', 'Niger',
'Nigeria', 'Niue', 'Norfolkinsel', 'Norwegen', 'Nördliche Marianen',
'Oman', 'Osttimor', 'Österreich', 'Pakistan', 'Palau',
'Palästinensische Gebiete', 'Panama', 'Papua-Neuguinea', 'Paraguay',
'Peru', 'Philippinen', 'Pitcairn', 'Polen', 'Portugal', 'Puerto Rico',
'Republik Korea', 'Republik Moldau', 'Ruanda', 'Rumänien',
'Russische Föderation', 'Réunion', 'Salomonen', 'Sambia', 'Samoa',
'San Marino', 'Saudi-Arabien', 'Schweden', 'Schweiz', 'Senegal',
'Serbien', 'Serbien und Montenegro', 'Seychellen', 'Sierra Leone',
'Simbabwe', 'Singapur', 'Slowakei', 'Slowenien', 'Somalia',
'Sonderverwaltungszone Hongkong', 'Sonderverwaltungszone Macao',
'Spanien', 'Sri Lanka', 'St. Barthélemy', 'St. Helena', 'St. Kitts und Nevis',
'St. Lucia', 'St. Martin', 'St. Pierre und Miquelon',
'St. Vincent und die Grenadinen', 'Sudan', 'Suriname', 'Svalbard und Jan Mayen',
'Swasiland', 'Syrien', 'São Tomé und Príncipe',
'Südafrika', 'Südgeorgien und die Südlichen Sandwichinseln',
'Tadschikistan', 'Taiwan', 'Tansania', 'Thailand', 'Togo', 'Tokelau',
'Tonga', 'Trinidad und Tobago', 'Tschad', 'Tschechische Republik',
'Tunesien', 'Turkmenistan', 'Turks- und Caicosinseln', 'Tuvalu',
'Türkei', 'Uganda', 'Ukraine', 'Ungarn', 'Uruguay', 'Usbekistan',
'Vanuatu', 'Vatikanstadt', 'Venezuela', 'Vereinigte Arabische Emirate',
'Vereinigte Staaten', 'Vereinigtes Königreich', 'Vietnam', 'Wallis und Futuna',
'Weihnachtsinsel', 'Westsahara', 'Zentralafrikanische Republik',
'Zypern'
)
@classmethod
def street_suffix_short(cls):
return cls.random_element(cls.street_suffixes_short)
@classmethod
def street_suffix_long(cls):
return cls.random_element(cls.street_suffixes_long)
@classmethod
def city_name(cls):
return cls.random_element(cls.cities)
@classmethod
def state(cls):
return cls.random_element(cls.states)
|
from decimal import Decimal
from .formed import Formed
from .formationmixin import FormationMixin
from .formationfactory import FormationFactory
class Squad(FormationFactory, Formed, FormationMixin):
def __init__(self):
FormationMixin.__init__(self)
self._attack_success = self.to_attack()
"""
Formed(ABC) class realisation
####################################################################################################################
Begining:
"""
def to_attack(self) -> Decimal:
"""
Calculate the success rate of a squad’s attack.
:return: (Decimal)
"""
return round(Decimal(self.gmean([unit.to_attack() for unit in self._units])), 4)
def to_damage(self) -> float:
"""
Calculate the amount of damage caused by the squad to enemy
:return: (float)
"""
self._last_to_damage_val = sum([unit.to_damage() for unit in self._units])
return self._last_to_damage_val
def get_damage(self, dmg_val: float) -> None:
"""
Get squad damage.
:return: (None)
"""
unit_dmg = dmg_val / len(self._units)
for unit in self._units:
unit.get_damage(unit_dmg)
def incrs_unit_experience(self) -> None:
"""
Increase the experience of each unit in squad.
:return: (None)
"""
for unit in self.units:
unit.incrs_experience()
"""
End
####################################################################################################################
"""
"""
FormationFactory(ABC) class realisation
####################################################################################################################
Begining:
"""
@classmethod
def new_formation(cls):
"""
Factory method.
:return:
"""
keys = [subclass.__qualname__ for subclass in FormationFactory.__subclasses__()]
values = [subclass for subclass in FormationFactory.__subclasses__()]
subclasses = dict(zip(keys, values))
for key, value in subclasses.items():
if cls.__qualname__ == key:
return cls.__call__()
return False
"""
End
####################################################################################################################
"""
|
from .breed import Breed
from .dog import Dog
from .parenthood import Parenthood
from .role import Role
from .user import User
from .person import Person
|
import csv
import requests
import json
str_articlesdata_csv = 'ArticlesData.csv'
str_launchesdata_csv = 'LaunchesData.csv'
str_eventsdata_csv = 'EventsData.csv'
def write_row_launches(csvWriter, article_my_id, article_api_id, data):
for d in data:
csvWriter.writerow(
[article_my_id, article_api_id, d["id"], d["provider"]])
def write_row_events(csvWriter, article_my_id, article_api_id, data):
for d in data:
csvWriter.writerow(
[article_my_id, article_api_id, d["id"], d["provider"]])
# Write data to a CSV file
def writerSample(python_data):
with open(str_articlesdata_csv, mode="w", encoding='utf-8', newline='') as csvfile1, open(str_launchesdata_csv, mode="w", encoding='utf-8', newline='') as csvfile2, open(str_eventsdata_csv, mode="w", encoding='utf-8', newline='') as csvfile3:
# create a csv writers
csvWriter = csv.writer(csvfile1)
csvWriterLaunches = csv.writer(csvfile2)
csvWriterEvents = csv.writer(csvfile3)
# write the header
csvWriter.writerow(["my_id", "api_id", "title", "url", "imageUrl", "newsSite",
"summary", "updatedAt", "publishedAt", "featured", "launches", "events"])
csvWriterLaunches.writerow(
["article_my_id", "article_api_id", "launche_id", "provider"])
csvWriterEvents.writerow(
["article_my_id", "article_api_id", "event_id", "provider"])
# write rows
my_id = 0
for data in python_data:
if data['launches'] != []:
write_row_launches(csvWriterLaunches, my_id,
data["id"], data['launches'])
data['launches'] = f"NOT_EMPTY"
if data['events'] != []:
write_row_events(csvWriterEvents, my_id,
data["id"], data['events'])
data['events'] = f"NOT_EMPTY"
csvWriter.writerow([my_id, data["id"], data["title"], data["url"], data["imageUrl"], data["newsSite"],
data["summary"], data["updatedAt"], data["publishedAt"], data["featured"], data["launches"], data["events"]])
my_id += 1
if my_id >= 9_999: # hobby dev plan: 10_000
break
def get_len(url):
response = requests.get(url=url)
return int(response.content, 10)
def get_data(url):
response = requests.get(url=url)
if response.status_code in range(200, 300):
r = json.loads(response.content)
return r
def main():
total_articles = list()
url = 'https://api.spaceflightnewsapi.net/v3'
# count_total_articles = get_len(url + 'articles/count')
rote = '/articles'
param_limit = 10
param_start = 0 # 12_003
data = True
print(
f"Retrieving Spaceflight News API '{rote}' data ... _limit {param_limit} _start {param_start}")
while(data):
url_limit = f'{rote}?_limit={param_limit}'
url_start = f'&_start={param_start}'
data = get_data(url + url_limit + url_start)
if data:
for info in data:
print('.', end=" ")
total_articles.append(info)
param_start += param_limit
print("Creating csv files...")
writerSample(total_articles)
print("Done!")
if __name__ == '__main__':
main()
|
import copy
import pytest
from saleor.plugins.base_plugin import ConfigurationTypeField
from saleor.plugins.error_codes import PluginErrorCode
from saleor.plugins.manager import get_plugins_manager
from saleor.plugins.models import PluginConfiguration
from tests.api.utils import assert_no_permission, get_graphql_content
from tests.plugins.sample_plugins import PluginSample
from tests.plugins.utils import get_config_value
@pytest.fixture
def staff_api_client_can_manage_plugins(staff_api_client, permission_manage_plugins):
staff_api_client.user.user_permissions.add(permission_manage_plugins)
return staff_api_client
PLUGINS_QUERY = """
{
plugins(first:1){
edges{
node{
name
description
active
id
configuration{
name
type
value
helpText
label
}
}
}
}
}
"""
def test_query_plugin_configurations(staff_api_client_can_manage_plugins, settings):
# Enable test plugin
settings.PLUGINS = ["tests.plugins.sample_plugins.PluginSample"]
response = staff_api_client_can_manage_plugins.post_graphql(PLUGINS_QUERY)
content = get_graphql_content(response)
plugins = content["data"]["plugins"]["edges"]
assert len(plugins) == 1
plugin = plugins[0]["node"]
manager = get_plugins_manager()
sample_plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
confiugration_structure = PluginSample.CONFIG_STRUCTURE
assert plugin["id"] == sample_plugin.PLUGIN_ID
assert plugin["name"] == sample_plugin.PLUGIN_NAME
assert plugin["active"] == sample_plugin.DEFAULT_ACTIVE
assert plugin["description"] == sample_plugin.PLUGIN_DESCRIPTION
for index, configuration_item in enumerate(plugin["configuration"]):
assert configuration_item["name"] == sample_plugin.configuration[index]["name"]
if (
confiugration_structure[configuration_item["name"]]["type"]
== ConfigurationTypeField.STRING
):
assert (
configuration_item["value"]
== sample_plugin.configuration[index]["value"]
)
elif configuration_item["value"] is None:
assert not sample_plugin.configuration[index]["value"]
else:
assert (
configuration_item["value"]
== str(sample_plugin.configuration[index]["value"]).lower()
)
@pytest.mark.parametrize(
"password, expected_password, api_key, expected_api_key",
[
(None, None, None, None),
("ABCDEFGHIJ", "", "123456789", "6789"),
("", None, "", None),
(None, None, "1234", "4"),
],
)
def test_query_plugins_hides_secret_fields(
password,
expected_password,
api_key,
expected_api_key,
staff_api_client,
permission_manage_plugins,
settings,
):
settings.PLUGINS = ["tests.plugins.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
configuration = copy.deepcopy(plugin.configuration)
for conf_field in configuration:
if conf_field["name"] == "Password":
conf_field["value"] = password
if conf_field["name"] == "API private key":
conf_field["value"] = api_key
manager.save_plugin_configuration(
PluginSample.PLUGIN_ID,
PluginSample.PLUGIN_NAME,
{"active": True, "configuration": configuration},
)
staff_api_client.user.user_permissions.add(permission_manage_plugins)
response = staff_api_client.post_graphql(PLUGINS_QUERY)
content = get_graphql_content(response)
plugins = content["data"]["plugins"]["edges"]
assert len(plugins) == 1
plugin = plugins[0]["node"]
for conf_field in plugin["configuration"]:
if conf_field["name"] == "Password":
assert conf_field["value"] == expected_password
if conf_field["name"] == "API private key":
assert conf_field["value"] == expected_api_key
def test_query_plugin_configurations_as_customer_user(user_api_client, settings):
settings.PLUGINS = ["tests.plugins.sample_plugins.PluginSample"]
response = user_api_client.post_graphql(PLUGINS_QUERY)
assert_no_permission(response)
PLUGIN_QUERY = """
query plugin($id: ID!){
plugin(id:$id){
id
name
description
active
configuration{
name
value
type
helpText
label
}
}
}
"""
@pytest.mark.parametrize(
"password, expected_password, api_key, expected_api_key",
[
(None, None, None, None),
("ABCDEFGHIJ", "", "123456789", "6789"),
("", None, "", None),
(None, None, "1234", "4"),
],
)
def test_query_plugin_hides_secret_fields(
password,
expected_password,
api_key,
expected_api_key,
staff_api_client,
permission_manage_plugins,
settings,
):
settings.PLUGINS = ["tests.api.test_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
configuration = copy.deepcopy(plugin.configuration)
for conf_field in configuration:
if conf_field["name"] == "Password":
conf_field["value"] = password
if conf_field["name"] == "API private key":
conf_field["value"] = api_key
manager.save_plugin_configuration(
PluginSample.PLUGIN_ID,
PluginSample.PLUGIN_NAME,
{"active": True, "configuration": configuration},
)
variables = {"id": plugin.PLUGIN_ID}
staff_api_client.user.user_permissions.add(permission_manage_plugins)
response = staff_api_client.post_graphql(PLUGIN_QUERY, variables)
content = get_graphql_content(response)
plugin = content["data"]["plugin"]
for conf_field in plugin["configuration"]:
if conf_field["name"] == "Password":
assert conf_field["value"] == expected_password
if conf_field["name"] == "API private key":
assert conf_field["value"] == expected_api_key
def test_query_plugin_configuration(
staff_api_client, permission_manage_plugins, settings
):
settings.PLUGINS = ["tests.api.test_plugins.PluginSample"]
manager = get_plugins_manager()
sample_plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
variables = {"id": sample_plugin.PLUGIN_ID}
staff_api_client.user.user_permissions.add(permission_manage_plugins)
response = staff_api_client.post_graphql(PLUGIN_QUERY, variables)
content = get_graphql_content(response)
plugin = content["data"]["plugin"]
assert plugin["name"] == sample_plugin.PLUGIN_NAME
assert plugin["id"] == sample_plugin.PLUGIN_ID
assert plugin["active"] == sample_plugin.active
assert plugin["description"] == sample_plugin.PLUGIN_DESCRIPTION
configuration_item = plugin["configuration"][0]
assert configuration_item["name"] == sample_plugin.configuration[0]["name"]
assert configuration_item["value"] == sample_plugin.configuration[0]["value"]
def test_query_plugin_configuration_for_invalid_plugin_name(
staff_api_client, permission_manage_plugins
):
variables = {"id": "fake-name"}
staff_api_client.user.user_permissions.add(permission_manage_plugins)
response = staff_api_client.post_graphql(PLUGIN_QUERY, variables)
content = get_graphql_content(response)
assert content["data"]["plugin"] is None
def test_query_plugin_configuration_as_customer_user(user_api_client, settings):
settings.PLUGINS = ["tests.api.test_plugins.PluginSample"]
manager = get_plugins_manager()
sample_plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
variables = {"id": sample_plugin.PLUGIN_ID}
response = user_api_client.post_graphql(PLUGIN_QUERY, variables)
assert_no_permission(response)
PLUGIN_UPDATE_MUTATION = """
mutation pluginUpdate(
$id: ID!, $active: Boolean, $configuration: [ConfigurationItemInput]){
pluginUpdate(
id:$id,
input:{active: $active, configuration: $configuration}
){
plugin{
name
active
configuration{
name
value
type
helpText
label
}
}
errors{
field
message
}
pluginsErrors {
field
code
}
}
}
"""
@pytest.mark.parametrize(
"active, updated_configuration_item",
[
(True, {"name": "Username", "value": "user"}),
(False, {"name": "Username", "value": "admin@example.com"}),
],
)
def test_plugin_configuration_update(
staff_api_client_can_manage_plugins, settings, active, updated_configuration_item
):
settings.PLUGINS = ["tests.plugins.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
old_configuration = copy.deepcopy(plugin.configuration)
variables = {
"id": plugin.PLUGIN_ID,
"active": active,
"configuration": [updated_configuration_item],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
get_graphql_content(response)
plugin = PluginConfiguration.objects.get(identifier=PluginSample.PLUGIN_ID)
assert plugin.active == active
first_configuration_item = plugin.configuration[0]
assert first_configuration_item["name"] == updated_configuration_item["name"]
assert first_configuration_item["value"] == updated_configuration_item["value"]
second_configuration_item = plugin.configuration[1]
assert second_configuration_item["name"] == old_configuration[1]["name"]
assert second_configuration_item["value"] == old_configuration[1]["value"]
def test_plugin_configuration_update_containing_invalid_plugin_name(
staff_api_client_can_manage_plugins,
):
variables = {
"id": "fake-name",
"active": True,
"configuration": [{"name": "Username", "value": "user"}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
assert content["data"]["pluginUpdate"]["pluginsErrors"][0] == {
"field": "id",
"code": PluginErrorCode.NOT_FOUND.name,
}
def test_plugin_update_saves_boolean_as_boolean(
staff_api_client_can_manage_plugins, settings
):
settings.PLUGINS = ["tests.plugins.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
use_sandbox = get_config_value("Use sandbox", plugin.configuration)
variables = {
"id": plugin.PLUGIN_ID,
"active": plugin.active,
"configuration": [{"name": "Use sandbox", "value": True}],
}
response = staff_api_client_can_manage_plugins.post_graphql(
PLUGIN_UPDATE_MUTATION, variables
)
content = get_graphql_content(response)
assert len(content["data"]["pluginUpdate"]["errors"]) == 0
use_sandbox_new_value = get_config_value("Use sandbox", plugin.configuration)
assert type(use_sandbox) == type(use_sandbox_new_value)
@pytest.mark.parametrize(
"plugin_filter, count",
[
({"search": "PluginSample"}, 1),
({"search": "description"}, 2),
({"active": True}, 2),
({"search": "Plugin"}, 2),
({"active": "False", "search": "Plugin"}, 1),
],
)
def test_plugins_query_with_filter(
plugin_filter, count, staff_api_client_can_manage_plugins, settings
):
settings.PLUGINS = [
"tests.plugins.sample_plugins.PluginSample",
"tests.plugins.sample_plugins.PluginInactive",
"tests.plugins.sample_plugins.ActivePlugin",
]
query = """
query ($filter: PluginFilterInput) {
plugins(first: 5, filter:$filter) {
totalCount
edges {
node {
id
}
}
}
}
"""
variables = {"filter": plugin_filter}
response = staff_api_client_can_manage_plugins.post_graphql(query, variables)
content = get_graphql_content(response)
assert content["data"]["plugins"]["totalCount"] == count
def test_plugin_configuration_update_as_customer_user(user_api_client, settings):
settings.PLUGINS = ["tests.plugins.sample_plugins.PluginSample"]
manager = get_plugins_manager()
plugin = manager.get_plugin(PluginSample.PLUGIN_ID)
variables = {
"id": plugin.PLUGIN_ID,
"active": True,
"configuration": [{"name": "Username", "value": "user"}],
}
response = user_api_client.post_graphql(PLUGIN_UPDATE_MUTATION, variables)
assert_no_permission(response)
QUERY_PLUGIN_WITH_SORT = """
query ($sort_by: PluginSortingInput!) {
plugins(first:5, sortBy: $sort_by) {
edges{
node{
name
}
}
}
}
"""
@pytest.mark.parametrize(
"plugin_sort, result_order",
[
(
{"field": "NAME", "direction": "ASC"},
["Active", "PluginInactive", "PluginSample"],
),
(
{"field": "NAME", "direction": "DESC"},
["PluginSample", "PluginInactive", "Active"],
),
(
{"field": "IS_ACTIVE", "direction": "ASC"},
["PluginInactive", "Active", "PluginSample"],
),
(
{"field": "IS_ACTIVE", "direction": "DESC"},
["Active", "PluginSample", "PluginInactive"],
),
],
)
def test_query_plugins_with_sort(
plugin_sort, result_order, staff_api_client_can_manage_plugins, settings
):
settings.PLUGINS = [
"tests.plugins.sample_plugins.PluginSample",
"tests.plugins.sample_plugins.PluginInactive",
"tests.plugins.sample_plugins.ActivePlugin",
]
variables = {"sort_by": plugin_sort}
response = staff_api_client_can_manage_plugins.post_graphql(
QUERY_PLUGIN_WITH_SORT, variables
)
content = get_graphql_content(response)
plugins = content["data"]["plugins"]["edges"]
for order, plugin_name in enumerate(result_order):
assert plugins[order]["node"]["name"] == plugin_name
|
import numpy as np
import pandas as pd
from sklearn.metrics import auc
def AUSUC(Acc_tr, Acc_ts):
""" Calc area under seen-unseen curve """
# Sort by X axis
X_sorted_arg = np.argsort(Acc_tr)
sorted_X = np.array(Acc_tr)[X_sorted_arg]
sorted_Y = np.array(Acc_ts)[X_sorted_arg]
# zero pad
leftmost_X, leftmost_Y = 0, sorted_Y[0]
rightmost_X, rightmost_Y = sorted_X[-1], 0
sorted_X = np.block([np.array([leftmost_X]), sorted_X, np.array([rightmost_X])])
sorted_Y = np.block([np.array([leftmost_Y]), sorted_Y, np.array([rightmost_Y])])
# eval AUC
AUSUC = auc(sorted_X, sorted_Y)
return AUSUC
def calc_cs_ausuc(pred, y_gt, seen_classses, unseen_classses, gamma_range=None, verbose=True):
if gamma_range is None:
mx = pred.max()
delta = mx / 100
gamma_range = np.arange(-mx, mx, delta)
zs_metrics = ZSL_Metrics(seen_classses, unseen_classses)
Acc_tr_values = []
Acc_ts_values = []
for gamma in gamma_range:
cs_pred = pred.copy()
cs_pred[:, seen_classses] -= gamma
Acc_tr, Acc_ts, H = zs_metrics.generlized_scores(y_gt, cs_pred)
Acc_tr_values.append(Acc_tr)
Acc_ts_values.append(Acc_ts)
cs_ausuc = AUSUC(Acc_tr=Acc_tr_values, Acc_ts=Acc_ts_values)
if min(Acc_tr_values) > 0.01:
raise RuntimeError(f'CS AUSUC: Increase gamma range (add low values), because min(Acc_tr_values) equals {min(Acc_tr_values)}')
if min(Acc_ts_values) > 0.01:
raise RuntimeError(f'CS AUSUC: Increase gamma range (add high values), because min(Acc_ts_values) equals {min(Acc_ts_values)}')
if verbose:
print(f'AUSUC (by Calibrated Stacking) = {100*cs_ausuc:.1f}')
return cs_ausuc
class ZSL_Metrics():
def __init__(self, seen_classes, unseen_classes, report_entropy=False):
self._seen_classes = np.sort(seen_classes)
self._unseen_classes = np.sort(unseen_classes)
self._n_seen = len(seen_classes)
self._n_unseen = len(unseen_classes)
self._report_entropy = report_entropy
assert(self._n_seen == len(np.unique(seen_classes))) # sanity check
assert(self._n_unseen == len(np.unique(unseen_classes))) # sanity check
def unseen_balanced_accuracy(self, y_true, pred_softmax):
Acc_zs, Ent_zs = self._subset_classes_balanced_accuracy(y_true, pred_softmax,
self._unseen_classes)
if self._report_entropy:
return Acc_zs, Ent_zs
else:
return Acc_zs
def seen_balanced_accuracy(self, y_true, pred_softmax):
Acc_seen, Ent_seen = self._subset_classes_balanced_accuracy(y_true,
pred_softmax,
self._seen_classes)
if self._report_entropy:
return Acc_seen, Ent_seen
else:
return Acc_seen
def generlized_scores(self, y_true, pred_softmax):
Acc_ts, Ent_ts = self._generalized_unseen_balanced_accuracy(y_true,
pred_softmax)
Acc_tr, Ent_tr = self._generalized_seen_balanced_accuracy(y_true, pred_softmax)
H = 2*Acc_tr*Acc_ts/(Acc_tr + Acc_ts)
Ent_H = 2*Ent_tr*Ent_ts/(Ent_tr + Ent_ts)
if self._report_entropy:
return Acc_ts, Acc_tr, H, Ent_ts, Ent_tr, Ent_H
else:
return Acc_ts, Acc_tr, H
def _generalized_unseen_balanced_accuracy(self, y_true, pred_softmax):
return self._generalized_subset_balanced_accuracy(y_true, pred_softmax,
self._unseen_classes)
def _generalized_seen_balanced_accuracy(self, y_true, pred_softmax):
return self._generalized_subset_balanced_accuracy(y_true, pred_softmax,
self._seen_classes)
def _generalized_subset_balanced_accuracy(self, y_true, pred_softmax, subset_classes):
is_member = np.in1d # np.in1d is like MATLAB's ismember
ix_subset_samples = is_member(y_true, subset_classes)
y_true_subset = y_true[ix_subset_samples]
all_classes = np.sort(np.block([self._seen_classes, self._unseen_classes]))
y_pred = all_classes[(pred_softmax[:, all_classes]).argmax(axis=1)]
y_pred_subset = y_pred[ix_subset_samples]
Acc = float(xian_per_class_accuracy(y_true_subset, y_pred_subset,
len(subset_classes)))
# Ent = float(entropy2(pred_softmax[ix_subset_samples, :][:, all_classes]).mean())
Ent = 0*Acc + 1e-3 # disabled because its too slow
return Acc, Ent
def _subset_classes_balanced_accuracy(self, y_true, pred_softmax, subset_classes):
is_member = np.in1d # np.in1d is like MATLAB's ismember
ix_subset_samples = is_member(y_true, subset_classes)
y_true_zs = y_true[ix_subset_samples]
y_pred = subset_classes[(pred_softmax[:, subset_classes]).argmax(axis=1)]
y_pred_zs = y_pred[ix_subset_samples]
Acc = float(xian_per_class_accuracy(y_true_zs, y_pred_zs, len(subset_classes)))
# Ent = float(entropy2(pred_softmax[:, subset_classes]).mean())
Ent = 0*Acc + 1e-3 # disabled because its too slow
return Acc, Ent
def xian_per_class_accuracy(y_true, y_pred, num_class=None):
""" A balanced accuracy metric as in Xian (CVPR 2017). Accuracy is
evaluated individually per class, and then uniformly averaged between
classes.
"""
y_true = y_true.flatten().astype('int32')
# # if num_class is None:
# # num_class = len(np.unique(np.block([y_true, y_pred])))
# print(num_class)
## my method is faster
# return balanced_accuracy_score(y_true, y_pred, num_class=num_class)
if num_class is None:
num_class = len(np.unique(np.block([y_true, y_pred])))
# num_class = len(counts_per_class) # e.g. @CUB: 50, 100, 150
# num_class = len(np.unique(y_true))
max_class_id = 1+max([num_class, y_true.max(), y_pred.max()])
counts_per_class_s = pd.Series(y_true).value_counts()
counts_per_class = np.zeros((max_class_id,))
counts_per_class[counts_per_class_s.index] = counts_per_class_s.values
# accuracy = ((y_pred == y_true) / np.array(
# [counts_per_class[y] for y in y_true])).sum() / num_class
accuracy = (1.*(y_pred == y_true) / counts_per_class[y_true]).sum() / num_class
return accuracy.astype('float32')
|
# Generated by Django 3.1.3 on 2020-11-11 09:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hospital',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hospital_id', models.IntegerField()),
('name', models.CharField(max_length=250)),
('type', models.CharField(max_length=50)),
('map_url', models.CharField(max_length=250)),
('pincode', models.IntegerField()),
('phone_number', models.IntegerField()),
('test_blood', models.BooleanField()),
('donate_blood', models.BooleanField()),
],
),
]
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from google.cloud.ndb import _datastore_api
from google.cloud.ndb import _options
from google.cloud.ndb import utils
class MyOptions(_options.Options):
__slots__ = ["foo", "bar"]
class TestOptions:
@staticmethod
def test_constructor_w_bad_arg():
with pytest.raises(TypeError):
MyOptions(kind="test")
@staticmethod
def test_constructor_w_deadline():
options = MyOptions(deadline=20)
assert options.timeout == 20
@staticmethod
def test_constructor_w_deadline_and_timeout():
with pytest.raises(TypeError):
MyOptions(timeout=20, deadline=10)
@staticmethod
def test_constructor_w_use_memcache():
options = MyOptions(use_memcache=True)
assert options.use_global_cache is True
@staticmethod
def test_constructor_w_use_global_cache():
options = MyOptions(use_global_cache=True)
assert options.use_global_cache is True
@staticmethod
def test_constructor_w_use_memcache_and_global_cache():
with pytest.raises(TypeError):
MyOptions(use_global_cache=True, use_memcache=False)
@staticmethod
def test_constructor_w_use_datastore():
options = MyOptions(use_datastore=False)
assert options.use_datastore is False
@staticmethod
def test_constructor_w_use_cache():
options = MyOptions(use_cache=20)
assert options.use_cache == 20
@staticmethod
def test_constructor_w_memcache_timeout():
options = MyOptions(memcache_timeout=20)
assert options.global_cache_timeout == 20
@staticmethod
def test_constructor_w_global_cache_timeout():
options = MyOptions(global_cache_timeout=20)
assert options.global_cache_timeout == 20
@staticmethod
def test_constructor_w_memcache_and_global_cache_timeout():
with pytest.raises(TypeError):
MyOptions(memcache_timeout=20, global_cache_timeout=20)
@staticmethod
def test_constructor_w_max_memcache_items():
with pytest.raises(NotImplementedError):
MyOptions(max_memcache_items=20)
@staticmethod
def test_constructor_w_force_writes():
with pytest.raises(NotImplementedError):
MyOptions(force_writes=20)
@staticmethod
def test_constructor_w_propagation():
with pytest.raises(NotImplementedError):
MyOptions(propagation=20)
@staticmethod
def test_constructor_w_xg():
options = MyOptions(xg=True)
assert options == MyOptions()
@staticmethod
def test_constructor_with_config():
config = MyOptions(retries=5, foo="config_test")
options = MyOptions(config=config, retries=8, bar="app")
assert options.retries == 8
assert options.bar == "app"
assert options.foo == "config_test"
@staticmethod
def test_constructor_with_bad_config():
with pytest.raises(TypeError):
MyOptions(config="bad")
@staticmethod
def test___repr__():
representation = "MyOptions(foo='test', bar='app')"
options = MyOptions(foo="test", bar="app")
assert options.__repr__() == representation
@staticmethod
def test__eq__():
options = MyOptions(foo="test", bar="app")
other = MyOptions(foo="test", bar="app")
otherother = MyOptions(foo="nope", bar="noway")
assert options == other
assert options != otherother
assert options != "foo"
@staticmethod
def test_copy():
options = MyOptions(retries=8, bar="app")
options = options.copy(bar="app2", foo="foo")
assert options.retries == 8
assert options.bar == "app2"
assert options.foo == "foo"
@staticmethod
def test_items():
options = MyOptions(retries=8, bar="app")
items = [
(key, value) for key, value in options.items() if value is not None
]
assert items == [("bar", "app"), ("retries", 8)]
@staticmethod
def test_options():
@MyOptions.options
@utils.positional(4)
def hi(mom, foo=None, retries=None, timeout=None, _options=None):
return mom, _options
assert hi("mom", "bar", 23, timeout=42) == (
"mom",
MyOptions(foo="bar", retries=23, timeout=42),
)
@staticmethod
def test_options_bad_signature():
@utils.positional(2)
def hi(foo, mom):
pass
with pytest.raises(TypeError):
MyOptions.options(hi)
hi("mom", "!") # coverage
@staticmethod
def test_options_delegated():
@MyOptions.options
@utils.positional(4)
def hi(mom, foo=None, retries=None, timeout=None, _options=None):
return mom, _options
options = MyOptions(foo="bar", retries=23, timeout=42)
assert hi("mom", "baz", 24, timeout=43, _options=options) == (
"mom",
options,
)
class TestReadOptions:
@staticmethod
def test_constructor_w_read_policy():
options = _options.ReadOptions(
read_policy=_datastore_api.EVENTUAL_CONSISTENCY
)
assert options == _options.ReadOptions(
read_consistency=_datastore_api.EVENTUAL
)
@staticmethod
def test_constructor_w_read_policy_and_read_consistency():
with pytest.raises(TypeError):
_options.ReadOptions(
read_policy=_datastore_api.EVENTUAL_CONSISTENCY,
read_consistency=_datastore_api.EVENTUAL,
)
|
#!/usr/bin/python
#credits to http://www.devshed.com/c/a/python/ssh-with-twisted/
from twisted.internet import reactor
from twisted.web import server, resource from twisted.cred import portal, checkers from twisted.conch import manhole, manhole_ssh
class LinksPage(resource.Resource):
isLeaf = 1
def __init__(self, links) :
resource.Resource.__init__(self)
self.links = links
def render(self, request):
return "<ul>" + "".join([
"<li><a href=’%s’>%s</a></li>" % (link, title)
for title, link in self.links.items()]) + "</ul>"
links = {‘Twisted’: ‘http://twistedmatrix.com/’,
‘Python’: ‘http://python.org’}
site = server.Site(LinksPage(links)) reactor.listenTCP(8000, site)
def getManholeFactory(namespace, **passwords):
realm = manhole_ssh.TerminalRealm()
def getManhole(_): return manhole.Manhole(namespace)
realm.chainedProtocolFactory.protocolFactory = getManhole
p = portal.Portal(realm)
p.registerChecker(
checkers.InMemoryUsernamePassword DatabaseDontUse(**passwords))
f = manhole_ssh.ConchFactory(p)
return f
reactor.listenTCP(2222, getManholeFactory(globals(), admin=’aaa’))
reactor.run()
|
import datetime
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
try:
from django.utils.timezone import now
except ImportError:
now = datetime.datetime.now
STATUS_CHOICES = ((0, "Placed"), (1, "Charged"), (2, "Shipped"), (3, "Cancelled"))
class Order(models.Model):
name = models.CharField(max_length=255)
customer = models.CharField(max_length=255, default="", blank=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
action_on_save = models.BooleanField(default=False)
def get_absolute_url(self):
return "/inlines/%i/" % self.pk
def __str__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=255)
sku = models.CharField(max_length=13)
price = models.DecimalField(decimal_places=2, max_digits=12, db_index=True)
order = models.ForeignKey(Order, related_name="items", on_delete=models.CASCADE)
status = models.SmallIntegerField(default=0, choices=STATUS_CHOICES, db_index=True)
date_placed = models.DateField(default=now, null=True, blank=True)
def __str__(self):
return "%s (%s)" % (self.name, self.sku)
class Contact(models.Model):
name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
order = models.ForeignKey(Order, related_name="contacts", on_delete=models.CASCADE)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey("content_type", "object_id")
def __str__(self):
return self.name
class Event(models.Model):
name = models.CharField(max_length=255)
date = models.DateField()
def __str__(self):
return self.name
|
import os.path
import subprocess
def FlagsForFile(filename, **kwargs):
flags = ['-std=c++14', '-I/usr/local/include', '-I.']
# Call the config script to get the directories for the Python3 interpreter
# in PATH.
py_includes = subprocess.check_output(
['python3-config', '--includes']
).decode('utf-8').split()
flags.extend(py_includes)
proj_dir = os.path.dirname(os.path.realpath(__file__))
proj_include = os.path.join(proj_dir, 'include')
flags.append('-I' + proj_include)
return {'flags': flags}
|
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch import nn
from torch import Tensor
from torchvision.transforms import Compose, Resize, ToTensor
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from torchsummary import summary
# PatchEmbedding
class PatchEmbedding(nn.Module):
def __init__(self, in_channels: int = 3, patch_size: int = 16, emb_size: int = 768, img_size: int = 224):
self.patch_size = patch_size
super().__init__()
self.projection = nn.Sequential(
# using a conv layer instead of a linear one -> performance gains
nn.Conv2d(in_channels, emb_size, kernel_size=patch_size, stride=patch_size),
Rearrange('b e (h) (w) -> b (h w) e'),
)
self.cls_token = nn.Parameter(torch.randn(1, 1, emb_size))
self.positions = nn.Parameter(torch.randn((img_size // patch_size) ** 2 + 1, emb_size))
def forward(self, x: Tensor) -> Tensor:
b, _, _, _ = x.shape
x = self.projection(x)
cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
# prepend the cls token to the input
x = torch.cat([cls_tokens, x], dim=1)
# add position embedding
x += self.positions
return x
# MultiHeadAttention
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size: int = 768, num_heads: int = 8, dropout: float = 0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
# fuse the queries, keys and values in one matrix
self.qkv = nn.Linear(emb_size, emb_size * 3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x: Tensor, mask: Tensor = None) -> Tensor:
# split keys, queries and values in num_heads
qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
queries, keys, values = qkv[0], qkv[1], qkv[2]
# sum up over the last axis
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys) # batch, num_heads, query_len, key_len
if mask is not None:
fill_value = torch.finfo(torch.float32).min
energy.mask_fill(~mask, fill_value)
scaling = self.emb_size ** (1 / 2)
att = F.softmax(energy, dim=-1) / scaling
att = self.att_drop(att)
# sum up over the third axis
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.projection(out)
return out
# MLP
class FeedForwardBlock(nn.Sequential):
def __init__(self, emb_size: int, expansion: int = 4, drop_p: float = 0.):
super().__init__(
nn.Linear(emb_size, expansion * emb_size),
nn.GELU(),
nn.Dropout(drop_p),
nn.Linear(expansion * emb_size, emb_size),
)
class ResidualAdd(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
res = x
x = self.fn(x, **kwargs)
x += res
return x
class TransformerEncoderBlock(nn.Sequential):
def __init__(self,
emb_size: int = 768,
drop_p: float = 0.,
forward_expansion: int = 4,
forward_drop_p: float = 0.,
** kwargs):
super().__init__(
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
MultiHeadAttention(emb_size, **kwargs),
nn.Dropout(drop_p)
)),
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
FeedForwardBlock(
emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
nn.Dropout(drop_p)
)
))
class TransformerEncoder(nn.Sequential):
def __init__(self, depth: int = 12, **kwargs):
super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
class ClassificationHead(nn.Sequential):
def __init__(self, emb_size: int = 768, n_classes: int = 1000):
super().__init__(
Reduce('b n e -> b e', reduction='mean'),
nn.LayerNorm(emb_size),
nn.Linear(emb_size, n_classes))
class ViT(nn.Sequential):
def __init__(self,
in_channels: int = 3,
patch_size: int = 16,
emb_size: int = 768,
img_size: int = 224,
depth: int = 12,
n_classes: int = 1000,
**kwargs):
super().__init__(
PatchEmbedding(in_channels, patch_size, emb_size, img_size),
TransformerEncoder(depth, emb_size=emb_size, **kwargs),
ClassificationHead(emb_size, n_classes)
)
summary(ViT(), (3, 224, 224), device='cpu')
|
"""
MTG Card Quiz
(c) 2022 Matthew E Poush
"""
from collections import defaultdict
import json
import gzip
from sanitize_text import sanitize_text
class CardGenerator:
colors = {
'B': 'Black',
'G': 'Green',
'R': 'Red',
'W': 'White',
'U': 'Blue',
}
def __init__(self, input_file, approved_types):
with gzip.open(input_file) as a:
self.contents = json.load(a)
self.approved_types = approved_types
self.data = None
def all_cards_in(self, approved_types, type, cards, name, **_):
colors = self.colors
if type not in approved_types:
return
for card in cards:
name = card['name']
card_color = ' / '.join(colors[c] for c in sorted(card['colors'])) or 'Colorless'
card_type = ' / '.join(card['types'])
if '//' in name:
a, b = name.split('//')
if card['side'] == 'a':
name = a.strip()
else:
name = b.strip()
sanitized_name = sanitize_text(name)
if ' ' in sanitized_name:
yield sanitized_name, (name, card_color, card_type)
def cards_from_approved_sets(self):
for collection_name, collection in self.contents['data'].items():
yield from self.all_cards_in(self.approved_types, **collection)
def possible_starts(self, card_name):
words = [x.upper() for x in card_name.split() if len(x) > 2 and x[0]]
while len(words) > 1:
first, *words = words
for second in words:
if len(first) == 3 == len(second):
continue
yield first[:3] + ' ' + second[:3]
def generate(self):
card_names = set(self.cards_from_approved_sets())
possibles = defaultdict(list)
for name, value in card_names:
for hint in self.possible_starts(name):
possibles[hint].append(value)
singles = {k: v[0] for k, v in possibles.items() if len(v) == 1}
_, colors, types = zip(*singles.values())
colors = sorted(set(colors))
types = sorted(set(types))
in_order = sorted(singles.items())
compacted = {k: (n, colors.index(c), types.index(t)) for k, (n, c, t) in in_order}
self.data = {
'clues': compacted,
'colors': colors,
'types': types
}
def save_to(self, output_file):
with open(output_file, 'w') as f:
json.dump(self.data, f, separators=(',', ':'))
def main():
input_file = 'AllPrintings.json.gz'
output_file = 'cards.json'
approved_types = {'commander', 'core', 'draft_innovation', 'expansion'}
generator = CardGenerator(input_file, approved_types)
generator.generate()
generator.save_to(output_file)
# create_cards(input_file, output_file, approved_types)
if __name__ == '__main__':
main()
|
from .base_buffer import *
from .replay_buffer import *
|
# /*******************************************************************************
# Copyright Intel Corporation.
# This software and the related documents are Intel copyrighted materials, and your use of them
# is governed by the express license under which they were provided to you (License).
# Unless the License provides otherwise, you may not use, modify, copy, publish, distribute, disclose
# or transmit this software or the related documents without Intel's prior written permission.
# This software and the related documents are provided as is, with no express or implied warranties,
# other than those that are expressly stated in the License.
#
# *******************************************************************************/
import json
import logging
import faulthandler
from functools import wraps
from multiprocessing import Process, Queue
from typing import Callable, Dict, Optional
def _result_summary_recursive(summary_value: Dict, is_root: bool = False) -> int:
result_error_code = 0
for _, data in summary_value.items():
subcheck_error_code = 0
if "RetVal" in data:
if data["RetVal"] not in ["PASS", "WARNING", "FAIL", "ERROR", "INFO"]:
raise ValueError(
f"Error in subtree: {data}. RetVal value can be only PASS, WARNING, FAIL, ERROR, INFO")
elif data["RetVal"] == "WARNING":
subcheck_error_code = 1
elif data["RetVal"] == "FAIL":
subcheck_error_code = 2
elif data["RetVal"] == "ERROR":
subcheck_error_code = 3
else:
raise ValueError(f"Error in subtree: {data}. RetVal is required")
if "Verbosity" in data:
if not isinstance(data["Verbosity"], int):
raise ValueError(
f"Error in subtree: {data}. Verbosity must be a integer")
if is_root:
if 0 < data["Verbosity"]:
raise ValueError(
f"Error in subtree: {data}. Root verbosity level must to be zero")
if "Message" in data:
if not isinstance(data["Message"], str):
raise ValueError(
f"Error in subtree: {data}. Message must be a string")
if "Command" in data:
if not isinstance(data["Command"], str):
raise ValueError(
f"Error in subtree: {data}. Command must be a string")
if "Value" in data:
if isinstance(data["Value"], dict):
subcheck_error_code = max(
subcheck_error_code,
_result_summary_recursive(data["Value"])
)
else:
raise ValueError(f"Error in subtree: {data}. Value is required")
result_error_code = max(
result_error_code,
subcheck_error_code
)
return result_error_code
def _result_summary_is_correct(
summary: Dict) -> int:
if len(summary) == 0:
raise ValueError("Value dictionary cannot be empty")
if "Value" not in summary:
raise ValueError("Result summary is not correct: Top level should contain Value")
return _result_summary_recursive(summary_value=summary["Value"], is_root=True)
def _metadata_is_correct(metadata):
try:
json.loads(metadata.dataReq)
except Exception:
raise ValueError(
f"Metadata: {metadata} contains wrong 'dataReq' value. Isn't valid json")
if " " in metadata.name:
raise ValueError(
f"Metadata: {metadata} contains wrong 'name' value. Name have to be without spaces")
tags = [elem.strip() for elem in metadata.tags.split(",")]
for tag in tags:
if " " in tag:
raise ValueError(
f"Metadata: {metadata} contains wrong 'tag' value. "
f"Tag have to be without spaces. Tag '{tag}' have a space.")
correct_rights = ["user", "admin"]
if metadata.rights not in correct_rights:
raise ValueError(
f"Metadata: {metadata} contains wrong 'rights' value. "
f"Rights can be 'user' and 'admin' only")
class CheckMetadataPy:
name: str
type: str
tags: str
descr: str
dataReq: str
rights: str
timeout: int
version: str
run: str
def __init__(
self,
name: str,
type: str,
tags: str,
descr: str,
dataReq: str,
rights: str,
timeout: int,
version: str,
run: str) -> None:
self.name = name
self.type = type
self.tags = tags
self.descr = descr
self.dataReq = dataReq
self.rights = rights
self.timeout = timeout
self.version = version
self.run = run
self.__post_init__()
def __post_init__(self) -> None:
_metadata_is_correct(self)
sorted_tags = [elem.strip() for elem in self.tags.split(",")]
sorted_tags.sort()
self.tags = ",".join(sorted_tags)
def __str__(self) -> str:
result = f"{type(self).__name__}("
for key, value in self.__dict__.items():
result += f"'{key}'='{value}',"
result = f"{result[:-1]})"
return result
def __repr__(self) -> str:
return str(self)
class CheckSummary:
error_code: int
result: str
def __init__(self, result: str) -> None:
self.error_code = _result_summary_is_correct(json.loads(result))
self.result = result
def __str__(self) -> str:
result = f"{type(self).__name__}("
for key, value in self.__dict__.items():
result += f"'{key}'='{value}',"
result = f"{result[:-1]})"
return result
def __repr__(self) -> str:
return str(self)
def check_correct_metadata(function: Callable) -> Callable:
@wraps(function)
def wrapper(self, *args, **kwargs):
ret = function(self, *args, **kwargs)
if hasattr(self, "metadata"):
_metadata_is_correct(self.metadata)
else:
logging.warning("Can't wraps function, because object have not metadata attribute")
return ret
return wrapper
def check_correct_summary(function: Callable) -> Callable:
@wraps(function)
def wrapper(self, *args, **kwargs):
ret = function(self, *args, **kwargs)
_result_summary_is_correct(json.loads(ret.result))
return ret
return wrapper
class BaseCheck:
metadata: Optional[CheckMetadataPy] = None
summary: Optional[CheckSummary] = None
def __init__(
self,
metadata: Optional[CheckMetadataPy] = None,
summary: Optional[CheckSummary] = None) -> None:
self.metadata = metadata
self.summary = summary
# It is necessary to return summary to ensure interprocessor communication
def run(self, data: Dict) -> CheckSummary:
raise NotImplementedError()
# It is necessary to return api version
def get_api_version(self) -> str:
raise NotImplementedError()
def get_metadata(self) -> Optional[CheckMetadataPy]:
return self.metadata
@check_correct_metadata
def update_metadata(self, update: Dict) -> None:
for key, value in update.items():
if key in self.metadata.__dict__.keys():
setattr(self.metadata, key, value)
def set_summary(self, summary: CheckSummary) -> None:
self.summary = summary
def get_summary(self) -> Optional[CheckSummary]:
return self.summary
def __str__(self) -> str:
result = f"{type(self).__name__}("
for key, value in self.__dict__.items():
result += f"'{key}'='{value}',"
result = f"{result[:-1]})"
return result
def __repr__(self) -> str:
return str(self)
def timeout_exit(function: Callable[[BaseCheck, Dict], CheckSummary]) -> \
Callable[[BaseCheck, Dict], CheckSummary]:
@wraps(function)
def wrapper(instance: BaseCheck, dataReqDict: Dict) -> CheckSummary:
faulthandler.enable()
queue = Queue()
def queue_wrapper(dataReqDict):
result = function(dataReqDict)
queue.put(result)
process = Process(target=queue_wrapper, args=(dataReqDict,))
process.start()
try:
result = queue.get(block=True, timeout=instance.get_metadata().timeout)
except Exception:
if process.exitcode is None:
process.terminate()
json_dict = {
"RetVal": "ERROR",
"Verbosity": 0,
"Message": "",
"Value": {
f"{instance.get_metadata().name}": {
"Value": "Timeout was exceeded",
"Verbosity": 0,
"Message": "",
"RetVal": "ERROR"
}
}
}
json_str = json.dumps(json_dict)
result = CheckSummary(result=json_str)
else:
json_dict = {
"RetVal": "ERROR",
"Verbosity": 0,
"Message": "",
"Value": {
f"{instance.get_metadata().name}": {
"Value": "",
"Verbosity": 0,
"Message": "The check crushed at runtime. No data was received. "
"See call stack above.",
"RetVal": "ERROR"
}
}
}
json_str = json.dumps(json_dict)
result = CheckSummary(result=json_str)
faulthandler.disable()
return result
return wrapper
|
'''Meeus: Astronomical Algorithms (2nd ed.), chapter 13'''
import numpy as np
from constants import *
def local_sid(sid,lon):
'''calculation of local sidereal time from Greenwich sidereal time (in degrees)'''
return sid+lon
def ra2ha(ra,sid):
'''calculation of hour angle from RA'''
return sid-ra
def ha2ra(ha,sid):
'''calculation of RA from hour angle'''
return sid-ha
def eq2azm(ha,dec,lat,sid=None,ra=False):
'''transformation of equatorial coordinates (Hour angle, DEC) to azimutal (Azm - from South!, Alt);
or from (RA, DEC) - give RA as 1st arg. instead of ha, set ra=True and give sidereal time (sid)'''
#type of output (same as input - number, list, numpy.array)
if sid is None: sid=0
out_type='lst'
if (isinstance(ha,int) or isinstance(ha,float)) and (isinstance(dec,int) or isinstance(dec,float)) and (isinstance(sid,int) or isinstance(sid,float)) and (isinstance(lat,int) or isinstance(lat,float)):
#all input args are numbers
out_type='num'
if isinstance(ha,np.ndarray) or isinstance(dec,np.ndarray) or isinstance(sid,np.ndarray) or isinstance(lat,np.ndarray):
#numpy.array
out_type='np'
if isinstance(ha,list): ha=np.array(ha)
if isinstance(dec,list): dec=np.array(dec)
if isinstance(sid,list): sid=np.array(sid)
if isinstance(lat,list): lat=np.array(lat)
if ra: t=sid-ha
else: t=ha #HA given
if out_type=='num': t=np.array([t])
t=np.deg2rad(t)
dec=np.deg2rad(dec)
lat=np.deg2rad(lat)
alt=np.arcsin(np.sin(dec)*np.sin(lat)+np.cos(dec)*np.cos(lat)*np.cos(t))
sinA=np.cos(dec)*np.sin(t)/np.cos(alt)
cosA=(-np.cos(lat)*np.sin(dec)+np.sin(lat)*np.cos(dec)*np.cos(t))/np.cos(alt)
azm=np.arctan2(sinA,cosA)
azm[np.where(azm<0)]+=2*np.pi
azm=np.rad2deg(azm)
alt=np.rad2deg(alt)
if out_type=='num':
alt=alt[0]
azm=azm[0]
elif out_type=='lst':
alt=alt.tolist()
azm=azm.tolist()
return azm,alt
def azm2eq(azm,alt,lat,sid=None,ra=False):
'''transformation of azimutal coordinates (Azm - from South!, Alt) to equatorial (Hour angle, DEC);
or to (RA, DEC) - set ra=True and give sidereal time (sid)'''
#type of output (same as input - number, list, numpy.array)
if sid is None: sid=0
out_type='lst'
if (isinstance(azm,int) or isinstance(azm,float)) and (isinstance(alt,int) or isinstance(alt,float)) and (isinstance(sid,int) or isinstance(sid,float)) and (isinstance(lat,int) or isinstance(lat,float)):
#all input args are numbers
out_type='num'
if isinstance(azm,np.ndarray) or isinstance(alt,np.ndarray) or isinstance(sid,np.ndarray) or isinstance(lat,np.ndarray):
#numpy.array
out_type='np'
if isinstance(azm,list): azm=np.array(azm)
if isinstance(alt,list): alt=np.array(alt)
if isinstance(sid,list): sid=np.array(sid)
if isinstance(lat,list): lat=np.array(lat)
if out_type=='num': azm=np.array([azm])
azm=np.deg2rad(azm)
alt=np.deg2rad(alt)
lat=np.deg2rad(lat)
dec=np.arcsin(np.sin(lat)*np.sin(alt)-np.cos(lat)*np.cos(alt)*np.cos(azm))
sinH=np.cos(alt)*np.sin(azm)/np.cos(dec)
cosH=(np.sin(alt)*np.cos(lat)+np.cos(alt)*np.sin(lat)*np.cos(azm))/np.cos(dec)
ha=np.arctan2(sinH,cosH)
ha[np.where(ha<0)]+=2*np.pi
ha=np.rad2deg(ha)
dec=np.rad2deg(dec)
if ra: ha=sid-ha
if out_type=='num':
dec=dec[0]
ha=ha[0]
elif out_type=='lst':
dec=dec.tolist()
ha=ha.tolist()
return ha,dec
def eq2ecl(ra,dec):
'''transformation of equatorial coordinates (RA, DEC) to ecliptical (lamda, beta)'''
#type of output (same as input - number, list, numpy.array)
out_type='lst'
if (isinstance(ra,int) or isinstance(ra,float)) and (isinstance(dec,int) or isinstance(dec,float)):
#all input args are numbers
out_type='num'
if isinstance(ra,np.ndarray) or isinstance(dec,np.ndarray):
#numpy.array
out_type='np'
if isinstance(ra,list): ra=np.array(ra)
if isinstance(dec,list): dec=np.array(dec)
if out_type=='num': ra=np.array([ra])
ra=np.deg2rad(ra)
dec=np.deg2rad(dec)
beta=np.arcsin(np.sin(dec)*np.cos(eps)-np.cos(dec)*np.sin(eps)*np.sin(ra))
sinL=(np.sin(ra)*np.cos(dec)*np.cos(eps)+np.sin(dec)*np.sin(eps))/np.cos(beta)
cosL=np.cos(ra)*np.cos(dec)/np.cos(beta)
lam=np.arctan2(sinL,cosL)
lam[np.where(lam<0)]+=2*np.pi
lam=np.rad2deg(lam)
beta=np.rad2deg(beta)
if out_type=='num':
beta=beta[0]
lam=lam[0]
elif out_type=='lst':
beta=beta.tolist()
lam=lam.tolist()
return lam,beta
def ecl2eq(lam,beta):
'''transformation of ecliptical coordinates (lamda, beta) to equatorial (RA, DEC)'''
#type of output (same as input - number, list, numpy.array)
out_type='lst'
if (isinstance(lam,int) or isinstance(lam,float)) and (isinstance(beta,int) or isinstance(beta,float)):
#all input args are numbers
out_type='num'
if isinstance(lam,np.ndarray) or isinstance(beta,np.ndarray):
#numpy.array
out_type='np'
if isinstance(lam,list): lam=np.array(lam)
if isinstance(beta,list): beta=np.array(beta)
if out_type=='num': lam=np.array([lam])
lam=np.deg2rad(lam)
beta=np.deg2rad(beta)
dec=np.arcsin(np.sin(beta)*np.cos(eps)+np.cos(beta)*np.sin(eps)*np.sin(lam))
sinR=(np.sin(lam)*np.cos(beta)*np.cos(eps)-np.sin(beta)*np.sin(eps))/np.cos(dec)
cosR=np.cos(lam)*np.cos(beta)/np.cos(dec)
ra=np.arctan2(sinR,cosR)
ra[np.where(ra<0)]+=2*np.pi
ra=np.rad2deg(ra)
dec=np.rad2deg(dec)
if out_type=='num':
dec=dec[0]
ra=ra[0]
elif out_type=='lst':
dec=dec.tolist()
ra=ra.tolist()
return ra,dec
def eq2gal(ra,dec):
'''transformation of equatorial coordinates (RA, DEC) to galactic (l, b)'''
#type of output (same as input - number, list, numpy.array)
out_type='lst'
if (isinstance(ra,int) or isinstance(ra,float)) and (isinstance(dec,int) or isinstance(dec,float)):
#all input args are numbers
out_type='num'
if isinstance(ra,np.ndarray) or isinstance(dec,np.ndarray):
#numpy.array
out_type='np'
if isinstance(ra,list): ra=np.array(ra)
if isinstance(dec,list): dec=np.array(dec)
if out_type=='num': ra=np.array([ra])
ra=np.deg2rad(ra)-raG
dec=np.deg2rad(dec)
b=np.arcsin(np.sin(dec)*np.sin(decG)+np.cos(dec)*np.cos(decG)*np.cos(ra))
sinL=np.cos(dec)*np.sin(ra)/np.cos(b)
cosL=(np.sin(dec)*np.cos(decG)-np.cos(dec)*np.sin(decG)*np.cos(ra))/np.cos(b)
l=np.arctan2(sinL,cosL)
l=lNP-l
l[np.where(l>=2*np.pi)]-=2*np.pi
l[np.where(l<0)]+=2*np.pi
l=np.rad2deg(l)
b=np.rad2deg(b)
if out_type=='num':
b=b[0]
l=l[0]
elif out_type=='lst':
b=b.tolist()
l=l.tolist()
return l,b
def gal2eq(l,b):
'''transformation of galactic coordinates (l, b) to equatorial (RA, DEC)'''
#type of output (same as input - number, list, numpy.array)
out_type='lst'
if (isinstance(l,int) or isinstance(l,float)) and (isinstance(b,int) or isinstance(b,float)):
#all input args are numbers
out_type='num'
if isinstance(l,np.ndarray) or isinstance(b,np.ndarray):
#numpy.array
out_type='np'
if isinstance(l,list): l=np.array(l)
if isinstance(b,list): b=np.array(b)
if out_type=='num': b=np.array([b])
l=lNP-np.deg2rad(l)
b=np.deg2rad(b)
dec=np.arcsin(np.sin(decG)*np.sin(b)+np.cos(decG)*np.cos(b)*np.cos(l))
sinR=np.cos(b)*np.sin(l)/np.cos(dec)
cosR=(np.cos(decG)*np.sin(b)-np.sin(decG)*np.cos(b)*np.cos(l))/np.cos(dec)
ra=np.arctan2(sinR,cosR)
ra+=raG
ra[np.where(ra>=2*np.pi)]-=2*np.pi
ra[np.where(ra<0)]+=2*np.pi
ra=np.rad2deg(ra)
dec=np.rad2deg(dec)
if out_type=='num':
ra=ra[0]
dec=dec[0]
elif out_type=='lst':
ra=ra.tolist()
dec=dec.tolist()
return ra,dec
|
from mdgen.constants import MARKDOWN_BOLD
class MarkdownBoldGenerator:
""" This class creates markdown bold text using input `text`."""
def new_bold_text(self, text: str = None):
return f"{MARKDOWN_BOLD}{text.strip()}{MARKDOWN_BOLD}"
|
# -*- coding: utf-8 -*-
from flask_restful import Api
from resources.person import PersonResource
from resources.company import CompanyResource
#Define app end points
def get_endpoints(app):
api = Api(app)
api.add_resource(PersonResource,'/people','/people/<string:username>')
api.add_resource(CompanyResource,'/company/<string:name>')
return api
|
from html2md.commands.Command import Command
from html2md.commands.Table import Table
class CellFeed(Command):
def __init__(self, args):
super().__init__()
self._rowspan = []
def __copy__(self):
return CellFeed(None)
def execute(self) -> str:
cell_content = super().execute()
table = self.ancestor
while table.tag != "table":
table = table.ancestor
cell_content = cell_content.replace("\\n", " ")
table._content_buffer.cell_feed(cell_content)
for attr in self._attrs:
if attr[0] == "colspan":
for i in range(1, int(attr[1])):
table._content_buffer.cell_feed("")
return ""
|
#coding:utf-8
import json
import os
def get_Frends_list():
k = 0
file_list=[i for i in os.listdir('./frends/') if i.endswith('json')]
frends_list=[]
for f in file_list:
try:
with open('./frends/{}'.format(f),'r',encoding='utf-8') as w:
data=w.read()[75:-4]
js=json.loads(data)
print(js)
for i in js["items_list"]:
k+=1
frends_list.append(i)
except :
print ("???")
print ("好友数:"+str(k))
return frends_list
frends_list=get_Frends_list()
print(frends_list)
for x in frends_list:
print(x)
print(len(frends_list))
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
opts = [
cfg.StrOpt('configuration_strategies',
help='configuration strategies, format is class-ids separated '
'by spaces. For example, '
'nova.compute.ibm.configuration_strategy_ovf.'
'ConfigurationStrategy'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
LOG = logging.getLogger(__name__)
class ConfigurationStrategy:
"""Base class for configuration strategies. To implement a custom
configuration strategy, the developer must subclass this class and override
the get_name and calc_configuration_data methods. The get_name
implementation is expected to return the text matching the type of the
configuration_strategy property added to the glance image being deployed.
The calc_configuration_data method is expected to use cs_data parameter
together with cs_properties parameter to replace the metada_template in
cs_data with openstack and user provided values in cs_properties.
The metadata_template text is extracted from the configuration_strategy
property in glance image. See ConfigurationStrategies class below and
methods in nova.compute.configuration_strategy_common that are handling
the logic to get configuration_strategy property from glance image as well
as the population of cs_data and cs_properties parameters.
The calc_configuration_data method must return a configuration_data
dictionary which contains one key/value pair with key being string
'present_disk' and value being a present_disk dictionary. Here is an
example:
config_data = {'present_disk': present_disk_info}
return config_data
See below for an example of the present_disk dictionary that shows the
required keys.
present_disk_info = {
'name': 'diskname.extension',
'volume_label': 'my_custom_label',
'files': files }
The present_disk_info dictionary can also have an optional key/value pair
where key is the string 'device_type' and value could be 'cdrom', 'disk',
'floppy' or any other custom type. The idea here is that it will be
something the compute drivers will understand when they transport
configuration_data to the instance.
Notice 'files' is another dictionary containing a list of key/value pairs
where key is the file name and value is the text content of the file.
Here is an example:
files = {'ovf-env.xml': 'ovf_env_text_here',
'kickstart.xml': 'kickstart_text_here',
'custom_script.sh': 'script_content_here' }
In most cases 'files' will be used to store the modified metadata_template
as a file.
In addition to implementing the ConfigurationStrategy class, make sure you
add your fully qualified implementation to the configuration_strategies
config option in your nova.conf file. This is how you register your unique
implementation.
There is another set of classes that may need to be changed to get your
custom configuration_strategy working depending on your custom transport
needs. Each independent hypervisor driver implements this transport. We
currently support creating and transporting ISO file with all the content
from present_disk in configuration_data. The configuration_data gets passed
in as a property in the instance dictionary and each driver is in charge
of creating the ISO and transporting it to the instance being booted. For
convenience we have created a helper method to deal with ISO creation found
in
nova.virt.ibm.config_filesystem_image_utils.build_config_filesystem_image.
If ISO and media transport is not the desired transport for your custom
implementation you may need to make changes to either the utility method,
the drivers themselves or both.
"""
def get_name(self):
"""Override to return a unique name, matching the 'type' in the image
metadata configuration strategy.
"""
return
def calc_configuration_data(self, instance, injected_files, admin_password,
cs_data, cs_properties):
"""Generates a configuration data dictionary with information to
transport to instance during boot.
:param instance: Instance object as returned by DB layer.
:param injected_files: dictionary with file names and their content.
:param admin_password: generated by openstack.
:param cs_data: configuration_strategy metadata in glance image.
:param cs_properties: dictionary containing configuration data
provided by openstack plus server metadata passed on boot
:returns: dictionary containing data to transport to the running
instance
"""
pass
class ConfigurationStrategies:
"""Registry of known configuration strategies."""
def __init__(self):
LOG.debug(_("Initializing configuration strategies..."))
self._strategies = {}
from nova.compute.ibm import configuration_strategy_ovf
from nova.compute.ibm import configuration_strategy_sysprep
config_strategy_objs = [
configuration_strategy_ovf.ConfigurationStrategy(),
configuration_strategy_sysprep.ConfigurationStrategy(),
]
class_names_str = CONF.configuration_strategies
if class_names_str:
class_names = re.split('\s+', class_names_str)
for class_name in class_names:
obj = importutils.import_object(class_name)
config_strategy_objs.append(obj)
for config_strategy_obj in config_strategy_objs:
name = config_strategy_obj.get_name()
LOG.debug('Strategy %s is %s' % (name, config_strategy_obj))
self._strategies[name] = config_strategy_obj
def calc_configuration_data(self, instance, image_meta, network_info,
injected_files, admin_password):
cs_data = self._extract_strategy(image_meta)
if not cs_data:
LOG.debug(_('No configuration strategy data in image metadata,'
' skipping.'))
return
cs_type = cs_data.get('type')
if not cs_type:
LOG.warning(_("Config strategy data doesn't specify a type,"
" skipping"))
return
cs_obj = self._strategies.get(cs_type)
if not cs_obj:
LOG.warning(_("Config strategy type doesn't match a known type,"
" ignoring. The type is '%s'") % cs_type)
return
cs_properties = self._build_property_map(instance, network_info,
admin_password)
config_data = cs_obj.calc_configuration_data(instance, injected_files,
admin_password, cs_data, cs_properties)
return config_data
@staticmethod
def _extract_strategy(image_meta):
if not image_meta:
return
image_properties = image_meta.get('properties')
if not image_properties:
return
configuration_strategy_str = (
image_properties.get('configuration_strategy'))
if not configuration_strategy_str:
LOG.debug(_('No configuration strategy in image metadata.'))
return
LOG.debug('configuration_strategy_str=%s' % configuration_strategy_str)
configuration_strategy = json.loads(configuration_strategy_str)
return configuration_strategy
@staticmethod
def _calc_dns_list_str(dns_list):
return ' '.join([d['address'] for d in dns_list])
@staticmethod
def _build_property_map(instance, network_info, admin_password):
all_property_values = {}
# These properties we calculate from OpenStack environment.
all_property_values['server.admin_password'] = admin_password
all_property_values['server.hostname'] = instance['hostname']
all_property_values['server.domainname'] = CONF.dhcp_domain
if len(network_info):
subnets = network_info[0]['network']['subnets']
if subnets:
dns = subnets[0]['dns']
if dns:
all_property_values['server.dns-client.pri_dns'] = (
dns[0]['address'])
if len(dns) > 1:
sec_dns_ip = dns[1]['address']
else:
# If there's only 1 DNS, default sec_dns to pri_dns
sec_dns_ip = dns[0]['address']
all_property_values['server.dns-client.sec_dns'] = (
sec_dns_ip)
all_property_values['server.dns-client.dns_list'] = (
ConfigurationStrategies._calc_dns_list_str(dns))
for nw_i, nw_all in enumerate(network_info):
nw_i = nw_i + 1
nw_data = nw_all['network']
mac = nw_all['address']
all_property_values['server.network.%d.mac' % nw_i] = (
mac)
all_property_values['server.network.%d.mac_alt' % nw_i] = (
mac.replace(':', '-'))
slot_id = int(mac[-2:], 16)
all_property_values['server.network.%d.slotnumber' % nw_i] = (
str(slot_id))
subnets = nw_data['subnets']
if not subnets:
# There's no subnet defined, so indicate use dhcp for this
# network.
addr_type = 'v4'
all_property_values[
'server.network.%d.%s.use_dhcp' % (nw_i, addr_type)] = (
'true')
continue
# Iterating through subnets to get ipv4 as well as ipv6 addresses
for subnet in subnets:
ips = subnet['ips']
if len(ips) == 0:
continue
# Getting first IP of each subnet
ip = ips[0]
gateway = subnet['gateway']['address']
# FIXME(blk-u): This bit is copied from nova.network.model.
if ip['version'] == 4:
addr_type = 'v4'
netmask = str(subnet.as_netaddr().netmask)
else:
addr_type = 'v6'
netmask = str(subnet.as_netaddr().prefixlen)
all_property_values[
'server.network.%d.%s.address' % (nw_i, addr_type)] = (
ip['address'])
all_property_values[
'server.network.%d.%s.netmask' % (nw_i, addr_type)] = (
netmask)
all_property_values[
'server.network.%d.%s.cidr' % (nw_i, addr_type)] = (
'%s/%s' % (ip['address'], subnet.as_netaddr().prefixlen))
all_property_values[
'server.network.%d.%s.gateway' % (nw_i, addr_type)] = (
gateway)
all_property_values[
'server.network.%d.%s.use_dhcp' % (nw_i, addr_type)] = (
'false')
# These properties are from the metadata passed in on the boot.
md = utils.instance_meta(instance)
for i in md:
key = 'server.metadata.' + i
LOG.debug('from metadata, setting key %s to %s' %
(key, md[i]))
all_property_values[key] = md[i]
return all_property_values
|
# limitation :pattern count of each char to be max 1
text = "912873129"
pat = "123"
window_found = False
prev_min = 0
prev_max = 0
prev_pos = -1
lookup_tab = dict()
# detect first window formation when all characters are found
# change window whenever new character was the previous minimum
# if new window smaller than previous smallest, change current min window
for pos, c in enumerate(text):
if c in pat:
prev_pos = lookup_tab.get(c, -1)
lookup_tab[c] = pos
if window_found == True:
cur_max = pos
if prev_pos == cur_min: # new min needed
cur_min = min(list([lookup_tab[k] for k in lookup_tab.keys()]))
print('new min on update of ', c, ' is ', cur_min)
if cur_max - cur_min < prev_max - prev_min:
prev_min = cur_min
prev_max = cur_max
elif len(lookup_tab) == len(pat):
cur_min = prev_min = min(list([lookup_tab[k] for k in lookup_tab.keys()]))
cur_max = prev_max = max(list([lookup_tab[k] for k in lookup_tab.keys()]))
window_found = True
print(prev_min, prev_max)
|
class Transect:
def __init__(self, name="Transect", files=[]):
self.name = name
self.files = files
@property
def numFiles(self):
return len(self.files)
@property
def firstLastText(self):
first = self.files[0]
last = self.files[-1]
return f"{first.name} . . . {last.name}"
def addFile(self, fp):
self.files.append(fp)
def clearFiles(self):
self.files.clear()
|
import torch.nn as nn
from .transformer import TransformerBlock, TransformerBlock_AL
from .embedding import BERTEmbedding, BERTEmbedding_AL
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout=0.1):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super().__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x, segment_info):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len)
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x, segment_info)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
x = transformer.forward(x, mask)
return x
class BERTAL(nn.Module):
"""
BERT AL model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout=0.1, config=None):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super().__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
self.detach = config["detach"]
self.g_dim = config["g_dim"]
self.h_dim = config["h_dim"]
self.act = config["act"]
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding_AL(vocab_size=vocab_size, g_dim=self.g_dim, embed_size=hidden)
# TODO: switch to ALBlock, and remove ModuleList if it doesn't work.
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock_AL(hidden, attn_heads, hidden * 4, dropout, self.g_dim, self.h_dim, self.act, self.detach) for _ in range(n_layers)])
def forward(self, x, segment_info, y):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len)
loss_data = {}
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x, y, loss, y_mask = self.embedding(x, segment_info, y)
loss_data["emb associated loss"] = self.embedding.al
loss_data["emb AE loss"] = self.embedding.ael
# running over multiple transformer blocks
for idx, transformer in enumerate(self.transformer_blocks):
x, y, l = transformer.forward(x, mask, y, y_mask)
loss += l
loss_data[f"transformer layer{idx} associated loss"] = transformer.ass_loss
loss_data[f"transformer layer{idx} AE loss"] = transformer.ae_loss
return loss, loss_data
def inference(self, x):
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding.inference(x)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
x = transformer.inference(x, mask)
y = self.transformer_blocks[-1].b(x)
# print(y.shape)
for transformer in reversed(self.transformer_blocks):
y = transformer.h(y)
# print(y.shape)
y_pred = self.embedding.h(y)
return y_pred
def encode(self, x, mode='tran'):
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding.inference(x)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
x = transformer.inference(x, mask)
y = self.transformer_blocks[-1].b(x)
for transformer in reversed(self.transformer_blocks):
y = transformer.h(y)
# y_pred = self.embedding.h(y)
if mode == 'tran':
return x # (batch, seq_len, hidden)
if mode == 'small':
return y #(batch, seq_len, h_dim)
|
# -*- coding: utf-8 -*-
""" Tests for the configuration file/command-line arguments. """
# This test should be run from its directory.
# TODO A configmanager object cannot parse multiple times a config file
# and/or the command line, preventing to 'reload' a configuration.
import os
from . import config
config_file_00 = os.path.join(os.path.dirname(__file__),'test-config-values-00.conf')
# 1. No config file, no command-line arguments (a.k.a. default values)
conf = config.configmanager()
conf.parse_config()
assert conf['osv_memory_age_limit'] == 1.0
assert os.path.join(conf['root_path'], 'addons') == conf['addons_path']
# 2. No config file, some command-line arguments
conf = config.configmanager()
# mess with the optparse.Option definition to allow an invalid path
conf.casts['addons_path'].action = 'store'
conf.parse_config(['--addons-path=/xyz/dont-exist', '--osv-memory-age-limit=2.3'])
assert conf['osv_memory_age_limit'] == 2.3
assert conf['addons_path'] == '/xyz/dont-exist'
# 3. Config file, no command-line arguments
conf = config.configmanager()
conf.parse_config(['-c', config_file_00])
assert conf['osv_memory_age_limit'] == 3.4
# 4. Config file, and command-line arguments
conf = config.configmanager()
conf.parse_config(['-c', config_file_00, '--osv-memory-age-limit=2.3'])
assert conf['osv_memory_age_limit'] == 2.3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple download tool for scraping LabelPlanet.co.uk pages for label
template specifications. Can output the descriptions for use in an INX file
enum, as well as the definitions for use in the label_guides.py enxtension's
database.
Licenced under the GNU General Public License v2.0
"""
from lxml.html import fromstring
import requests
import requests_cache
import re
import logging
from pprint import pformat
import argparse
class FormatFinder(object):
"""
Gets a list of known formats from a template list page
"""
def _nth_cell_text(self, row, nth):
selector = "td:nth-child({})".format(nth)
return row.cssselect(selector)[0].text_content()
def _get_xy_size_from_celltext(self, txt):
parts = re.findall(r"[\d.,]+", txt)
return parts
def _get_codes_from_celltext(self, txt):
lpcode = re.search("LP[^\s]+", txt)
avery = re.search("(?<=Avery )[A-Z\d]+", txt)
return (lpcode.group(), avery.group() if avery else None)
def _get_link_from_row(self, row):
link = row.cssselect("a")[0].attrib['href']
return link
def _get_item_from_row(self, row):
num_per_sheet = int(self._nth_cell_text(row, 1))
lab_size = self._nth_cell_text(row, 2)
# some lable sizes aren't supported
if any(s in lab_size for s in ['/']):
return None
lab_size = self._get_xy_size_from_celltext(lab_size)
codes = self._get_codes_from_celltext(self._nth_cell_text(row, 3))
link = self._get_link_from_row(row)
item = {
'size': lab_size,
'avery': codes[1],
'lpcode': codes[0],
'persheet': num_per_sheet,
'prodlink': link
}
return item
def get_list(self, list_page):
url = ("https://www.labelplanet.co.uk/label-templates/" +
list_page + ".php")
shape = {
"rectangular-rounded-corners": "rrect",
"rectangular-square-corners": "rect",
"square": "rrect",
"round": "circle",
"oval": "circle"
}[list_page]
r = requests.get(url)
doc = fromstring(r.text)
items = []
for prod_row in doc.cssselect(".templatetable tbody tr"):
# product rows have 3 cells
if (len(prod_row.getchildren()) == 3):
item = self._get_item_from_row(prod_row)
if (item):
item['shape'] = shape
items.append(item)
return items
class SpecRipper(object):
"""
Gets the full spec for a label from the template page
Updates the given item with description and label spec
"""
def __init__(self, item):
self.item = item
def _get_desc_text(self, doc):
e = doc.xpath('.//td'
'/strong[starts-with(text(), "Notes")]'
'/../..'
'/following-sibling::tr[1]/td'
'//li[1]')
parts = e[0].text_content().split("–")
if len(parts) > 1:
return parts[0].strip()
return None
def _get_cell_by_xy(self, table, x, y):
nxt = table.xpath(".//tr[2]/following-sibling::*[1]")[0]
# handle broken table formatting
# (missing <tr> on third row)
if nxt.tag == "td" and y > 3:
y += 3
selector = "tr:nth-child({y}) > td:nth-child({x})".format(x=x, y=y)
return table.cssselect(selector)[0]
def _get_dim_from_text(self, txt):
return txt.replace("mm", "").replace("(diameter)", "").strip()
def _get_xy_template_spec(self, doc):
table = doc.cssselect('.templatetable')[0]
# cell x, cell y, data
data_cells = [
(2, 3, 'size_x', 'dim'),
(3, 3, 'size_y', 'dim'),
(4, 3, 'count_x', 'int'),
(5, 3, 'count_y', 'int'),
(1, 5, 'margin_t', 'dim'),
(3, 5, 'margin_l', 'dim'),
(2, 7, 'pitch_x', 'dim'),
(1, 7, 'pitch_y', 'dim'),
]
spec = {}
for c in data_cells:
txt = self._get_cell_by_xy(table, c[0], c[1]).text_content()
if c[3] == 'dim':
txt = self._get_dim_from_text(txt)
spec[c[2]] = txt
return spec
def scrape(self):
logging.debug("Scraping template: %s", self.item['lpcode'])
url = self.item['prodlink']
r = requests.get(url)
doc = fromstring(r.text)
self.item['desc'] = self._get_desc_text(doc)
spec = self._get_xy_template_spec(doc)
logging.debug(pformat(spec))
self.item['layout'] = spec
class InxFormatter(object):
def format_inx(self, item):
idcode = item['avery'] if item['avery'] else item['lpcode'].replace("/", "_")
size = " x ".join(item['size'])
sheet = "A4"
codes = []
if item['avery']:
codes.append(item['avery'])
codes.append(item['lpcode'])
codes = ", ".join(codes)
desc = "Labels" if not item['desc'] else item['desc']
s = "<_item value=\"{code}\">{size}mm {desc} ({per}/sheet, {sheet}) [{allcodes}]</_item>".format(
code=idcode,
size=size,
per=item['persheet'],
sheet=sheet,
allcodes=codes,
desc=desc
)
return s
class SpecFormatter(object):
def format_spec(self, item):
idcode = item['avery'] if item['avery'] else item['lpcode'].replace("/", "_")
sheet = 'a4'
layout = item['layout']
s = "{indent}{idcode:16}['reg', 'mm', '{sheet}', {ml}, {mt}, {sx}, {sy}, {px}, {py}, {nx}, {ny}, '{shape}'],".format(
indent=" " * 4,
idcode="'{}':".format(idcode),
sheet=sheet,
ml=layout['margin_l'],
mt=layout['margin_t'],
sx=layout['size_x'],
sy=layout['size_y'],
px=layout['pitch_x'],
py=layout['pitch_y'],
nx=layout['count_x'],
ny=layout['count_y'],
shape=item['shape'],
)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Download label template specifications from '
'LabelPlanet.co.uk')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose mode')
parser.add_argument('-t', '--type', action='store', required=True,
choices=['rrect', 'rect', 'circ', 'oval', 'square'],
help='label type')
parser.add_argument('--inx', action='store_true',
help='print INX items')
parser.add_argument('--spec', action='store_true',
help='print specification items')
args = parser.parse_args()
# avoid re-downloading pages
requests_cache.install_cache('demo_cache')
# convert type
label_type = {
'rrect': 'rectangular-rounded-corners',
'rect': 'rectangular-square-corners',
'circ': 'round',
'oval': 'oval',
'square': 'square'
}[args.type]
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
ff = FormatFinder()
spec_list = ff.get_list(label_type)
logging.debug("Got list of specs: ")
logging.debug(pformat(spec_list))
# get spec layouts + descs etc
for spec in spec_list:
spec_ripper = SpecRipper(spec)
spec_ripper.scrape()
if args.inx:
inx_f = InxFormatter()
for s in spec_list:
inx = inx_f.format_inx(s)
print(inx)
if args.spec:
spec_f = SpecFormatter()
for s in spec_list:
spec = spec_f.format_spec(s)
print(spec)
|
import unittest
from openbadges.verifier.actions.tasks import add_task
from openbadges.verifier.tasks import task_named
from openbadges.verifier.tasks.task_types import VALIDATE_EXPECTED_NODE_CLASS
from openbadges.verifier.tasks.validation import OBClasses
class ValidateLanguagePropertyTests(unittest.TestCase):
def test_validate_language_prop_basic(self):
options = {'max_validation_depth': 3}
badgeclass = {
'id': 'http://example.org/badgeclass',
'@language': 'en-US'
}
state = {'graph': [badgeclass]}
task = add_task(VALIDATE_EXPECTED_NODE_CLASS, node_id=badgeclass['id'],
expected_class=OBClasses.BadgeClass, depth=0)
result, message, actions = task_named(VALIDATE_EXPECTED_NODE_CLASS)(state, task, **options)
self.assertTrue(result)
l_actions = [a for a in actions if a.get('prop_name') == '@language']
self.assertEqual(len(l_actions), 1)
result, message, actions = task_named(l_actions[0]['name'])(state, l_actions[0], **options)
self.assertTrue(result)
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tabular offline (C)MDP methods."""
import copy
import time
from absl import logging
import cvxopt
import jax
import jax.config
import jax.numpy as jnp
import numpy as np
import scipy
import scipy.optimize
from constrained_optidice.tabular import mdp_util as util
cvxopt.solvers.options['show_progress'] = False
jax.config.update('jax_enable_x64', True)
def _compute_marginal_distribution(mdp, pi, regularizer=0):
"""Compute marginal distribution for the given policy pi, d^pi(s,a)."""
p0_s = np.zeros(mdp.num_states)
p0_s[mdp.initial_state] = 1
p0 = (p0_s[:, None] * pi).reshape(mdp.num_states * mdp.num_actions)
p_pi = (mdp.transition.reshape(mdp.num_states * mdp.num_actions,
mdp.num_states)[:, :, None] * pi).reshape(
mdp.num_states * mdp.num_actions,
mdp.num_states * mdp.num_actions)
d = np.ones(mdp.num_states * mdp.num_actions)
d /= np.sum(d)
d_diag = np.diag(d)
e = np.sqrt(d_diag) @ (
np.eye(mdp.num_states * mdp.num_actions) - mdp.gamma * p_pi)
q = np.linalg.solve(
e.T @ e + regularizer * np.eye(mdp.num_states * mdp.num_actions),
(1 - mdp.gamma) * p0)
w = q - mdp.gamma * p_pi @ q
assert np.all(w > -1e-6), w
d_pi = w * d
d_pi[w < 0] = 0
d_pi /= np.sum(d_pi)
return d_pi.reshape(mdp.num_states, mdp.num_actions)
def generate_baseline_policy(cmdp: util.CMDP,
behavior_cost_thresholds: np.ndarray,
optimality: float) -> np.ndarray:
"""Generate a baseline policy for the CMDP.
Args:
cmdp: a CMDP instance.
behavior_cost_thresholds: cost threshold for behavior policy. [num_costs]
optimality: optimality of behavior policy.
(0: uniform policy, 1: optimal policy)
Returns:
behavior policy. [num_states, num_actions]
"""
cmdp = copy.copy(cmdp)
cmdp.cost_thresholds = behavior_cost_thresholds
cmdp_no_reward = copy.copy(cmdp)
cmdp_no_reward.reward *= 0
pi_opt = util.solve_cmdp(cmdp)
pi_unif = np.ones((cmdp.num_states, cmdp.num_actions)) / cmdp.num_actions
v_opt = util.policy_evaluation(cmdp, pi_opt)[0][0]
q_opt = util.policy_evaluation(cmdp, pi_opt)[1]
v_unif = util.policy_evaluation(cmdp, pi_unif)[0][0]
v_final_target = v_opt * optimality + (1 - optimality) * v_unif
softmax_reduction_factor = 0.9
temperature = 1e-6
pi_soft = scipy.special.softmax(q_opt / temperature, axis=1)
while util.policy_evaluation(cmdp, pi_soft)[0][0] > v_final_target:
temperature /= softmax_reduction_factor
pi_soft = scipy.special.softmax(q_opt / temperature, axis=1)
pi_soft /= np.sum(pi_soft, axis=1, keepdims=True)
pi_soft = constrained_optidice(cmdp_no_reward, pi_soft, alpha=1)
r, _, c, _ = util.policy_evaluation(cmdp, pi_soft)
logging.info('temp=%.6f, R=%.3f, C=%.3f / v_opt=%.3f, f_target=%.3f',
temperature, r[0], c[0][0], v_opt, v_final_target)
assert np.all(pi_soft >= -1e-4)
pi_b = pi_soft.copy()
return pi_b
def optidice(mdp: util.MDP, pi_b: np.ndarray, alpha: float):
"""f-divergence regularized RL.
max_{d} E_d[R(s,a)] - alpha * E_{d_b}[f(d(s,a)/d_b(s,a))]
We assume that f(x) = 0.5 (x-1)^2.
Args:
mdp: a MDP instance.
pi_b: behavior policy. [num_states, num_actions]
alpha: regularization hyperparameter for f-divergence.
Returns:
the resulting policy. [num_states, num_actions]
"""
d_b = _compute_marginal_distribution(mdp, pi_b).reshape(
mdp.num_states * mdp.num_actions) + 1e-6 # |S||A|
d_b /= np.sum(d_b)
p0 = np.eye(mdp.num_states)[mdp.initial_state] # |S|
r = np.array(mdp.reward.reshape(mdp.num_states * mdp.num_actions))
p = np.array(
mdp.transition.reshape(mdp.num_states * mdp.num_actions, mdp.num_states))
p = p / np.sum(p, axis=1, keepdims=True)
b = np.repeat(np.eye(mdp.num_states), mdp.num_actions, axis=0) # |S||A| x |S|
# Solve:
# minimize (1/2)*x^T P x + q^T x
# subject to G x <= h
# A x = b.
d_diag = np.diag(d_b)
qp_p = alpha * (d_diag)
qp_q = -d_diag @ r - alpha * d_b
qp_g = -np.eye(mdp.num_states * mdp.num_actions)
qp_h = np.zeros(mdp.num_states * mdp.num_actions)
qp_a = (b.T - mdp.gamma * p.T) @ d_diag
qp_b = (1 - mdp.gamma) * p0
cvxopt.solvers.options['show_progress'] = False
res = cvxopt.solvers.qp(
cvxopt.matrix(qp_p), cvxopt.matrix(qp_q), cvxopt.matrix(qp_g),
cvxopt.matrix(qp_h), cvxopt.matrix(qp_a), cvxopt.matrix(qp_b))
w = np.array(res['x'])[:, 0] # [num_states * num_actions]
assert np.all(w >= -1e-4), w
w = np.clip(w, 1e-10, np.inf)
pi = (w * d_b).reshape(mdp.num_states, mdp.num_actions) + 1e-10
pi /= np.sum(pi, axis=1, keepdims=True)
return w, d_b, pi
def constrained_optidice(cmdp: util.CMDP,
pi_b: np.ndarray,
alpha: float):
"""f-divergence regularized constrained RL.
max_{d} E_d[R(s,a)] - alpha * E_{d_b}[f(d(s,a)/d_b(s,a))]
s.t. E_d[C(s,a)] <= hat{c}.
We assume that f(x) = 0.5 (x-1)^2.
Args:
cmdp: a CMDP instance.
pi_b: behavior policy.
alpha: regularization hyperparameter for f-divergence.
Returns:
the resulting policy. [num_states, num_actions]
"""
d_b = _compute_marginal_distribution(cmdp, pi_b).reshape(
cmdp.num_states * cmdp.num_actions) + 1e-6 # |S||A|
d_b /= np.sum(d_b)
p0 = np.eye(cmdp.num_states)[cmdp.initial_state] # |S|
p = np.array(
cmdp.transition.reshape(cmdp.num_states * cmdp.num_actions,
cmdp.num_states))
p = p / np.sum(p, axis=1, keepdims=True)
b = np.repeat(
np.eye(cmdp.num_states), cmdp.num_actions, axis=0) # |S||A| x |S|
r = np.array(cmdp.reward.reshape(cmdp.num_states * cmdp.num_actions))
c = np.array(
cmdp.costs.reshape(cmdp.num_costs, cmdp.num_states * cmdp.num_actions))
# Solve:
# minimize (1/2)*x^T P x + q^T x
# subject to G x <= h
# A x = b.
d_diag = np.diag(d_b)
qp_p = alpha * (d_diag)
qp_q = -d_diag @ r - alpha * d_b
qp_g = np.concatenate(
[c @ d_diag, -np.eye(cmdp.num_states * cmdp.num_actions)], axis=0)
qp_h = np.concatenate(
[cmdp.cost_thresholds,
np.zeros(cmdp.num_states * cmdp.num_actions)])
qp_a = (b.T - cmdp.gamma * p.T) @ d_diag
qp_b = (1 - cmdp.gamma) * p0
res = cvxopt.solvers.qp(
cvxopt.matrix(qp_p), cvxopt.matrix(qp_q), cvxopt.matrix(qp_g),
cvxopt.matrix(qp_h), cvxopt.matrix(qp_a), cvxopt.matrix(qp_b))
w = np.array(res['x'])[:, 0] # [num_states * num_actions]
assert np.all(w >= -1e-4), w
w = np.clip(w, 1e-10, np.inf)
pi = (w * d_b).reshape(cmdp.num_states, cmdp.num_actions) + 1e-10
pi /= np.sum(pi, axis=1, keepdims=True)
assert np.all(pi >= -1e-6), pi
return np.array(pi)
def cost_upper_bound(cmdp: util.CMDP,
w: np.ndarray,
d_b: np.ndarray,
epsilon: float):
"""Compute cost upper bound based on the DICE w.
Args:
cmdp: CMDP instance.
w: stationary distribution correction estimate of the target policy.
d_b: stationary distribution of the behavior policy.
epsilon: hyperparameter that controls conservatism. (epsilon > 0)
Returns:
(cost upper bound, additional information)
"""
if cmdp.num_costs != 1:
raise NotImplementedError('cmdp.num_costs=1 is supported only.')
s0 = cmdp.initial_state
w = w.reshape(cmdp.num_states, cmdp.num_actions)
p_n = d_b.reshape(cmdp.num_states,
cmdp.num_actions)[:, :, None] * cmdp.transition + 1e-10
p_n = p_n.reshape(cmdp.num_states * cmdp.num_actions * cmdp.num_states)
c = cmdp.costs[0, :, :] # |S| x |A|
def loss_fn(variables):
tau, x = variables[0], variables[1:]
l = (1 - cmdp.gamma) * x[s0] + w[:, :, None] * (
c[:, :, None] + cmdp.gamma * x[None, None, :] - x[:, None, None])
l = l.reshape(cmdp.num_states * cmdp.num_actions * cmdp.num_states)
loss = tau * jax.nn.logsumexp(jnp.log(p_n) + l / tau) + tau * epsilon
return loss
loss_jit = jax.jit(loss_fn)
grad_jit = jax.jit(jax.grad(loss_fn))
f = lambda x: np.array(loss_jit(x))
jac = lambda x: np.array(grad_jit(x))
# Minimize loss_fn.
x0 = np.ones(cmdp.num_states + 1)
lb, ub = -np.ones_like(x0) * np.inf, np.ones_like(x0) * np.inf
lb[0] = 0 # tau >= 0
bounds = scipy.optimize.Bounds(lb, ub, keep_feasible=False)
solution = scipy.optimize.minimize(
f,
x0=x0,
jac=jac,
bounds=bounds,
options={
'maxiter': 10000,
'ftol': 1e-10,
'gtol': 1e-10,
})
# Additional information.
tau, x = solution.x[0], solution.x[1:]
l = (1 - cmdp.gamma) * x[s0] + w[:, :, None] * (
c[:, :, None] + cmdp.gamma * x[None, None, :] - x[:, None, None])
l = l.reshape(cmdp.num_states * cmdp.num_actions * cmdp.num_states)
loss = tau * scipy.special.logsumexp(np.log(p_n) + l / tau) + tau * epsilon
p = scipy.special.softmax(np.log(p_n) + (l / tau)) + 1e-10
kl = np.sum(p * np.log(p / p_n))
p_sa = np.sum(
p.reshape(cmdp.num_states, cmdp.num_actions, cmdp.num_states), axis=-1)
cost_ub = np.sum(p_sa * w * c)
info = {
'loss': loss,
'kl': kl,
'cost_ub': cost_ub,
'p': p,
'gap': loss - cost_ub
}
return np.array([loss]), info
def conservative_constrained_optidice(cmdp, pi_b, alpha, epsilon, verbose=0):
"""f-divergence regularized conservative constrained RL.
max_{d} E_d[R(s,a)] - alpha * E_{d_b}[f(d(s,a)/d_b(s,a))]
s.t. (cost upper bound) <= hat{c}.
We assume that f(x) = 0.5 (x-1)^2.
Args:
cmdp: a CMDP instance.
pi_b: behavior policy.
alpha: regularization hyperparameter for f-divergence.
epsilon: degree of conservatism. (0: cost upper bound = E_d[C(s,a)]).
verbose: whether using logging or not.
Returns:
the resulting policy. [num_states, num_actions]
"""
if cmdp.num_costs != 1:
raise NotImplementedError('cmdp.num_costs=1 is supported only.')
lamb_left = np.array([0.0])
lamb_right = np.array([10.0])
start_time = time.time()
for i in range(15):
lamb = (lamb_left + lamb_right) * 0.5
r_lamb = cmdp.reward - np.sum(lamb[:, None, None] * cmdp.costs, axis=0)
mdp = util.MDP(cmdp.num_states, cmdp.num_actions, cmdp.transition, r_lamb,
cmdp.gamma)
w, d_b, _ = optidice(mdp, pi_b, alpha)
cost_mean = cmdp.costs.reshape(cmdp.num_costs, cmdp.num_states *
cmdp.num_actions).dot(w * d_b)
cost_ub, info = cost_upper_bound(cmdp, w, d_b, epsilon)
if verbose:
logging.info(
'[%g] Lamb=%g, cost_ub=%.6g, gap=%.6g, kl=%.6g, cost_mean=%.6g / '
'elapsed_time=%.3g', i, lamb, cost_ub, info['gap'], info['kl'],
cost_mean,
time.time() - start_time)
if cost_ub[0] > cmdp.cost_thresholds[0]:
lamb_left = lamb
else:
lamb_right = lamb
lamb = lamb_right
r_lamb = cmdp.reward - np.sum(lamb[:, None, None] * cmdp.costs, axis=0)
mdp = util.MDP(cmdp.num_states, cmdp.num_actions, cmdp.transition, r_lamb,
cmdp.gamma)
w, d_b, pi = optidice(mdp, pi_b, alpha)
return pi
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 15 09:55:13 2018
@author: Administrator
"""
import numpy as np
import tensorflow as tf
import math
class Gauss(object):
def __init__(self,u=0.0, e=1):
'''
:param u:
:param e:
'''
self.u=u
self.e=e
def gauss(self,x,y,target_x=0.0, target_y=0.0):
'''
:param x:
:param y:
:param target_x:
:param target_y:
:return: a value for site weighted
'''
x-=target_x
y-=target_y
A=(1.0/( 2.0 * 3.141592654 * self.e * self.e))
B=math.exp(-((x-self.u) * (x-self.u) + (y-self.u) * (y-self.u))/(2 * self.e*self.e))
return A*B
def add_result(self,encoder_list,weights):
# sum_=sum([encoder_list[i]*weights[0] for i in range(len(weights))])
encoder_list=[tf.multiply(encoder_list[i],weights[i]) for i in range(len(weights))]
sum_=tf.reduce_sum(tf.convert_to_tensor(encoder_list),axis=0)
return sum_
if __name__=='__main__':
import numpy as np
site=[[121.412, 31.1654],[118.775,32.0],[120.543,31.2994],
[120.87,32.02],[120.294,31.56],[119.9633,31.762],
[119.6707, 32.1875],[120.2072, 30.2111],[121.554, 29.8906],
[120.576, 30.007],[120.1,30.8867],[120.726, 30.7478],
[121.419, 28.6542], [122.3094, 29.9558]]
site=np.array(site)
gauss=Gauss()
sites=[gauss.gauss(x=s[0],y=s[1],target_x=site[0,0],target_y=site[0,1]) for s in site]
print(np.array(sites)/sum(sites))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import deconvolve
__all__ = ['xcorr', 'levinson', 'lsf2poly', 'poly2lsf']
def xcorr(x, y=None, maxlags=None, norm='biased'):
"""Cross-correlation using np.correlate
Estimates the cross-correlation (and autocorrelation) sequence of a random
process of length N. By default, there is no normalisation and the output
sequence of the cross-correlation has a length 2*N+1.
:param array x: first data array of length N
:param array y: second data array of length N. If not specified, computes the
autocorrelation.
:param int maxlags: compute cross correlation between [-maxlags:maxlags]
when maxlags is not specified, the range of lags is [-N+1:N-1].
:param str option: normalisation in ['biased', 'unbiased', None, 'coeff']
The true cross-correlation sequence is
.. math:: r_{xy}[m] = E(x[n+m].y^*[n]) = E(x[n].y^*[n-m])
However, in practice, only a finite segment of one realization of the
infinite-length random process is available.
The correlation is estimated using np.correlate(x,y,'full').
Normalisation is handled by this function using the following cases:
* 'biased': Biased estimate of the cross-correlation function
* 'unbiased': Unbiased estimate of the cross-correlation function
* 'coeff': Normalizes the sequence so the autocorrelations at zero
lag is 1.0.
:return:
* a np.array containing the cross-correlation sequence (length 2*N-1)
* lags vector
.. note:: If x and y are not the same length, the shorter vector is
zero-padded to the length of the longer vector.
"""
N = len(x)
if y is None:
y = x
assert len(x) == len(y), 'x and y must have the same length. Add zeros if needed'
if maxlags is None:
maxlags = N-1
lags = np.arange(0, 2*N-1)
else:
assert maxlags <= N, 'maxlags must be less than data length'
lags = np.arange(N-maxlags-1, N+maxlags)
res = np.correlate(x, y, mode='full')
if norm == 'biased':
Nf = float(N)
res = res[lags] / float(N) # do not use /= !!
elif norm == 'unbiased':
res = res[lags] / (float(N)-abs(np.arange(-N+1, N)))[lags]
elif norm == 'coeff':
Nf = float(N)
rms = pylab_rms_flat(x) * pylab_rms_flat(y)
res = res[lags] / rms / Nf
else:
res = res[lags]
lags = np.arange(-maxlags, maxlags+1)
return res, lags
def levinson(r, order=None, allow_singularity=False):
r"""Levinson-Durbin recursion.
Find the coefficients of a length(r)-1 order autoregressive linear process
:param r: autocorrelation sequence of length N + 1 (first element being the zero-lag autocorrelation)
:param order: requested order of the autoregressive coefficients. default is N.
:param allow_singularity: false by default. Other implementations may be True (e.g., octave)
:return:
* the `N+1` autoregressive coefficients :math:`A=(1, a_1...a_N)`
* the prediction errors
* the `N` reflections coefficients values
This algorithm solves the set of complex linear simultaneous equations
using Levinson algorithm.
.. math::
\bold{T}_M \left( \begin{array}{c} 1 \\ \bold{a}_M \end{array} \right) =
\left( \begin{array}{c} \rho_M \\ \bold{0}_M \end{array} \right)
where :math:`\bold{T}_M` is a Hermitian Toeplitz matrix with elements
:math:`T_0, T_1, \dots ,T_M`.
.. note:: Solving this equations by Gaussian elimination would
require :math:`M^3` operations whereas the levinson algorithm
requires :math:`M^2+M` additions and :math:`M^2+M` multiplications.
This is equivalent to solve the following symmetric Toeplitz system of
linear equations
.. math::
\left( \begin{array}{cccc}
r_1 & r_2^* & \dots & r_{n}^*\\
r_2 & r_1^* & \dots & r_{n-1}^*\\
\dots & \dots & \dots & \dots\\
r_n & \dots & r_2 & r_1 \end{array} \right)
\left( \begin{array}{cccc}
a_2\\
a_3 \\
\dots \\
a_{N+1} \end{array} \right)
=
\left( \begin{array}{cccc}
-r_2\\
-r_3 \\
\dots \\
-r_{N+1} \end{array} \right)
where :math:`r = (r_1 ... r_{N+1})` is the input autocorrelation vector, and
:math:`r_i^*` denotes the complex conjugate of :math:`r_i`. The input r is typically
a vector of autocorrelation coefficients where lag 0 is the first
element :math:`r_1`.
"""
#from np import isrealobj
T0 = np.real(r[0])
T = r[1:]
M = len(T)
if order is None:
M = len(T)
else:
assert order <= M, 'order must be less than size of the input data'
M = order
realdata = np.isrealobj(r)
if realdata is True:
A = np.zeros(M, dtype=float)
ref = np.zeros(M, dtype=float)
else:
A = np.zeros(M, dtype=complex)
ref = np.zeros(M, dtype=complex)
P = T0
for k in range(0, M):
save = T[k]
if k == 0:
temp = -save / P
else:
#save += sum([A[j]*T[k-j-1] for j in range(0,k)])
for j in range(0, k):
save = save + A[j] * T[k-j-1]
temp = -save / P
if realdata:
P = P * (1. - temp**2.)
else:
P = P * (1. - (temp.real**2+temp.imag**2))
if P <= 0 and allow_singularity==False:
raise ValueError("singular matrix")
A[k] = temp
ref[k] = temp # save reflection coeff at each step
if k == 0:
continue
khalf = (k+1)//2
if realdata is True:
for j in range(0, khalf):
kj = k-j-1
save = A[j]
A[j] = save + temp * A[kj]
if j != kj:
A[kj] += temp*save
else:
for j in range(0, khalf):
kj = k-j-1
save = A[j]
A[j] = save + temp * A[kj].conjugate()
if j != kj:
A[kj] = A[kj] + temp * save.conjugate()
return A, P, ref
def lsf2poly(lsf):
"""Convert line spectral frequencies to prediction filter coefficients
returns a vector a containing the prediction filter coefficients from a vector lsf of line spectral frequencies.
.. doctest::
>>> lsf = [0.7842 , 1.5605 , 1.8776 , 1.8984, 2.3593]
>>> a = lsf2poly(lsf)
array([ 1.00000000e+00, 6.14837835e-01, 9.89884967e-01,
9.31594056e-05, 3.13713832e-03, -8.12002261e-03 ])
"""
# Reference: A.M. Kondoz, "Digital Speech: Coding for Low Bit Rate Communications
# Systems" John Wiley & Sons 1994 ,Chapter 4
# Line spectral frequencies must be real.
lsf = np.array(lsf)
if max(lsf) > np.pi or min(lsf) < 0:
raise ValueError('Line spectral frequencies must be between 0 and pi.')
p = len(lsf) # model order
# Form zeros using the LSFs and unit amplitudes
z = np.exp(1.j * lsf)
# Separate the zeros to those belonging to P and Q
rQ = z[0::2]
rP = z[1::2]
# Include the conjugates as well
rQ = np.concatenate((rQ, rQ.conjugate()))
rP = np.concatenate((rP, rP.conjugate()))
# Form the polynomials P and Q, note that these should be real
Q = np.poly(rQ);
P = np.poly(rP);
# Form the sum and difference filters by including known roots at z = 1 and
# z = -1
if p % 2:
# Odd order: z = +1 and z = -1 are roots of the difference filter, P1(z)
P1 = np.convolve(P, [1, 0, -1])
Q1 = Q
else:
# Even order: z = -1 is a root of the sum filter, Q1(z) and z = 1 is a
# root of the difference filter, P1(z)
P1 = np.convolve(P, [1, -1])
Q1 = np.convolve(Q, [1, 1])
# Prediction polynomial is formed by averaging P1 and Q1
a = .5 * (P1 + Q1)
return a[0:-1:1] # do not return last element
def poly2lsf(a):
"""Prediction polynomial to line spectral frequencies.
converts the prediction polynomial specified by A,
into the corresponding line spectral frequencies, LSF.
normalizes the prediction polynomial by A(1).
"""
#Line spectral frequencies are not defined for complex polynomials.
# Normalize the polynomial
a = np.array(a)
if a[0] != 1:
a/=a[0]
if max(np.abs(np.roots(a))) >= 1.0:
error('The polynomial must have all roots inside of the unit circle.');
# Form the sum and differnce filters
p = len(a)-1 # The leading one in the polynomial is not used
a1 = np.concatenate((a, np.array([0])))
a2 = a1[-1::-1]
P1 = a1 - a2 # Difference filter
Q1 = a1 + a2 # Sum Filter
# If order is even, remove the known root at z = 1 for P1 and z = -1 for Q1
# If odd, remove both the roots from P1
if p%2: # Odd order
P, r = deconvolve(P1,[1, 0 ,-1])
Q = Q1
else: # Even order
P, r = deconvolve(P1, [1, -1])
Q, r = deconvolve(Q1, [1, 1])
rP = np.roots(P)
rQ = np.roots(Q)
aP = np.angle(rP[1::2])
aQ = np.angle(rQ[1::2])
lsf = sorted(np.concatenate((-aP,-aQ)))
return lsf
|
import uui
import network
from wlanstate import wlanState
import network
import machine
class WifiDialog(uui.Screen):
def __init__(self, parent, ssid, callback):
super().__init__(parent)
self.callback = callback
self.ssid = ssid
self.add(uui.TextPanel(self, b"SSID: %s" % ssid))
self.add(uui.TextPanel(self, b"Pass:")).setPos(0, 1)
self.add(uui.TextEntry(self, self.onEnter, 20), True).setPos(0, 2)
def onEnter(self, pw):
self.destroy()
self.callback(self.ssid, pw)
def unfocus(self):
super().unfocus()
self.destroy()
class WifiScreen(uui.InteractiveScreen):
def __init__(self, parent):
super().__init__(parent, 20, 4)
self.wlan = network.WLAN(network.STA_IF)
self.wifiEnable = self.add(uui.CheckBox(self, self.onWiFiChange, machine.nvs_getint("wifiEnabled"), format = b"Enabled [%s]"))
self.wifiEnable.setPos(1, 0)
self.wifiButton = self.add(uui.Button(self, b"WiFi: %s" % (wlanState.ssid or b"None"), self.onWiFiOpen), False)
self.wifiButton.setPos(1, 1)
self.ipstate = self.add(uui.TextPanel(self, b"IP: %s" % self.wlan.ifconfig()[0]))
self.ipstate.setPos(1, 2)
self.wifiStatus = self.add(uui.TextPanel(self))
def onWiFiChange(self, value):
machine.nvs_setint("wifiEnabled", value and 1 or 0)
self.wlan.active(value)
def onWiFiSelect(self, ap):
self.setScreen(WifiDialog(self, ap, self.onWiFiConnect))
def onWiFiConnect(self, ssid, pw):
self.wlan.connect(ssid, pw)
def onWiFiOpen(self):
if(not machine.nvs_getint("wifiEnabled")):
return
menu = uui.MenuScreen(self, 4, self.onWiFiSelect)
aps = self.wlan.scan()
for ap in aps:
menu.addEntry(ap[0])
self.setScreen(menu)
def onKeyDown(self, key):
super().onKeyDown(key)
if(key in b"B"):
self.destroy()
class NetworkScreen(uui.InteractiveScreen):
def __init__(self, parent):
super().__init__(parent, 20, 4)
self.telnetEnable = self.add(uui.CheckBox(self, self.onTelnetChange, machine.nvs_getint("telnetEnabled"), format = b"Telnet [%s]"))
self.telnetEnable.setPos(1, 0)
self.telnetEnable = self.add(uui.CheckBox(self, self.onFTPChange, machine.nvs_getint("ftpEnabled"), format = b"FTP [%s]"))
self.telnetEnable.setPos(1, 1)
log = self.add(uui.TextEntry(self, self.onLogin, 20, name = b"L:"), False)
log.setPos(1, 2)
log.setValue(machine.nvs_getstr("networkLogin").encode())
pas = self.add(uui.TextEntry(self, self.onPassword, 20, name = b"P:"), False)
pas.setPos(1, 3)
def _update(self):
user = machine.nvs_getstr("networkLogin")
password = machine.nvs_getstr("networkPassword")
if(machine.nvs_getint("ftpEnabled")):
network.ftp.start(user = user, password = password)
else:
network.ftp.stop()
if(machine.nvs_getint("telnetEnabled")):
network.telnet.start(user = user, password = password)
else:
network.telnet.stop()
def onPassword(self, pwd):
machine.nvs_setstr("networkPassword", pwd)
def onLogin(self, login):
machine.nvs_setstr("networkLogin", login)
def onFTPChange(self, value):
machine.nvs_setint("ftpEnabled", value and 1 or 0)
def onTelnetChange(self, value):
machine.nvs_setint("telnetEnabled", value and 1 or 0)
def onKeyDown(self, key):
super().onKeyDown(key)
if(key in b"B"):
self._update()
self.destroy()
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/usr/bin/env python2.7
"""A client that talks to tensorflow_model_server.
Typical usage example:
xgboost_client.py --num_tests=100 --server=localhost:9000
"""
from __future__ import print_function
import sys
import time
import grpc
import numpy
import random
from grpc._cython.cygrpc import CompressionAlgorithm
from grpc._cython.cygrpc import CompressionLevel
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from google.protobuf import text_format
import tensorflow as tf
tf.compat.v1.app.flags.DEFINE_integer('num_tests', 1, 'Number of tests')
tf.compat.v1.app.flags.DEFINE_string('server', '', 'PredictionService host:port')
FLAGS = tf.compat.v1.app.flags.FLAGS
def do_inference(hostport, num_tests):
"""Tests PredictionService with requests.
Args:
hostport: Host:port address of the PredictionService.
num_tests: Number of test images to use.
Returns:
void.
"""
host, port = hostport.split(':')
options = [
("grpc.default_compression_algorithm", CompressionAlgorithm.gzip),
("grpc.grpc.default_compression_level", CompressionLevel.high)
]
channel = grpc.insecure_channel(hostport, options)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'test'
# request.model_spec.version.value = 1
xgboost_feature_score_1 = predict_pb2.FeatureScore(
id=[2, 34, 2000, 2206],
score=[1, 1, 0.646667, 0.727273],
)
xgboost_features = [xgboost_feature_score_1 for i in range(0, 1)]
xgboost_feature_vector = predict_pb2.FeatureScoreVector(feature_score=xgboost_features)
request.inputs['xgboost_features'].CopyFrom(xgboost_feature_vector)
response = stub.Predict(request, 30.0)
print(response)
def main(_):
if FLAGS.num_tests > 10000:
print('num_tests should not be greater than 10k')
return
if not FLAGS.server:
print('please specify server host:port')
return
do_inference(FLAGS.server, FLAGS.num_tests)
if __name__ == '__main__':
tf.compat.v1.app.run()
|
# Python Class 2406
# Lesson 12 Problem 1
# Author: snowapple (471208)
class Game:
def __init__(self, n):
'''__init__(n) -> Game
creates an instance of the Game class'''
if n% 2 == 0: #n has to be odd
print('Please enter an odd n!')
raise ValueError
self.n = n #size of side of board
self.board = [[0 for x in range(self.n)] for x in range(self.n)] #holds current state of the board, list of columns
self.is_won = 0#is_won is 0 if game is not won, and 1 or 2 if won by player 1 or 2 respectively
def __str__(self):
'''__str__() -> str
returns a str representation of the current state of the board'''
ans = ""
print_dict = {0:'. ', 1:'X ', 2:'O '} #On the board, these numbers represent the pieces
for i in range(self.n):#row
row = ""
for j in range(self.n):#column
row += print_dict[self.board[j][i]] #prints the board piece to where the player puts it
ans = row + "\n" + ans
title = ""
for i in range(self.n):
title += str(i) + " "
ans = '\n' + title + '\n' +ans
return ans
def clear_board(self):
'''clear_board() -> none
clears the board by setting all entries to 0'''
self.is_won = 0
self.board = [[0 for x in range(self.n)] for x in range(self.n)]
def put(self,player_num,column):#takes care of errors
'''put(player_num,column) -> boolean
puts a piece of type player_num in the specified column,
returns boolean which is true if the put was successful, otherwise false'''
if self.is_won != 0: #if the game has been won
print('Please start a new game as player ' + str(self.is_won) + ' has already won!')
return False
if player_num not in [1,2]: #if a valid player number is not entered
print('Please enter 1 or 2 for the player number!')
return False
if column < 0 or column >= self.n: #if a valid column is not entered
print('Please enter a valid column!')
return False
try:
row = self.board[column].index(0)
self.board[column][row]= player_num
self.is_won = self.win_index(column,row)
return True
except ValueError:
print('Column is full!')
return False
def win_index(self,column_index,row_index):
'''win_index(column_index,row_index) -> int
checks if piece at (column_index, row_index) is part of a connect 4
returns player_num if the piece is part of a connect4, and 0 otherwise'''
#uses axis_check to check all of the axes
player_num = self.board[column_index][row_index]
#check up/down axis
col = self.board[column_index]
col_win = self.axis_check(col,row_index,player_num) #checks the row since it goes up/down
if col_win != 0: #checks to see if won
return col_win
#check left/right axis
row = [self.board[i][row_index] for i in range(self.n)]
row_win = self.axis_check(row,column_index,player_num) #checks column since it goes left/right
if row_win != 0: #checks to see if won
return row_win
#down-left/up-right diagonal axis
axis = [player_num]
index = 0
#down-left part
curr_col_index = column_index - 1 #goes left so subtract one
curr_row_index = row_index - 1 #goes down so subtract one
while curr_row_index >= 0 and curr_col_index >= 0: #until you go to the most down-left part of the board
axis = [self.board[curr_col_index][curr_row_index]] + axis
curr_col_index -= 1
curr_row_index -= 1
index += 1
#up-right part
curr_col_index = column_index + 1 #goes right so add one
curr_row_index = row_index + 1 #goes up so add one
while curr_row_index < self.n and curr_col_index < self.n: #until you go to the most up-right part of the board
axis = axis +[self.board[curr_col_index][curr_row_index]]
curr_col_index += 1
curr_row_index += 1
diag_win = self.axis_check(axis,index,player_num)
if diag_win != 0: #checks to see if won
return diag_win
#up-left/down-right diagonal axis
axis = [player_num]
index = 0
#up-left part
curr_col_index = column_index - 1 #goes left so minus one
curr_row_index = row_index + 1 #goes up so plus one
while curr_row_index < self.n and curr_col_index >= 0: #until you go to the most up-left part of the board
axis = [self.board[curr_col_index][curr_row_index]] + axis
curr_col_index -= 1
curr_row_index += 1
index += 1
#down-right part
curr_col_index = column_index + 1 #goes right so plus one
curr_row_index = row_index - 1 # goes down so minus one
while curr_row_index >= 0 and curr_col_index < self.n: #until you go to the most down-right part of the board
axis = axis +[self.board[curr_col_index][curr_row_index]]
curr_col_index += 1
curr_row_index -= 1
diag_win = self.axis_check(axis,index,player_num)
if diag_win != 0: #checks to see if won
return diag_win
return 0
def axis_check(self,axis, index, player_num):
'''axis_check(axis, index, player_num) -> int
checks if index in axis (list) is part of a connect4
returns player_num if the index is indeed part of a connect4 and 0 otherwise'''
#takes the index and sees if the piece is part of a connect four and generalizes it for the four axes(up/down, left/right, two diagonals)
down = index
up = index
for i in range(index,-1, -1):
if axis[i] == player_num:
down = i
else:
break
for i in range(index,len(axis)):
if axis[i] == player_num:
up = i
else:
break
if up - down + 1 >= 4:
# print('Player ' + str(player_num) + ' has won the game!')
return player_num
return 0
game = Game(7)
labels = {1:'X', 2:'O'}
play = True
while play:
#setting up the board and players
game.clear_board()
name1 = input('Player ' + labels[1] + ' , enter your name: ')
name2 = input('Player ' + labels[2] + ' , enter your name: ')
names = {1:name1, 2:name2}
print(game)
turn = 1
while game.is_won == 0:
success = False
while not success:
#until someone wins each player takes turns
col_choice = int(input(names[turn] + ", you're " + labels[turn] + ". What column do you want to play in? "))
success = game.put(turn,col_choice)
print(game)
turn = turn % 2 +1 #to take turns between players
print("Congratulations, " + names[game.is_won]+", you won!")
#if players want to play again
play_another = ""
while play_another not in ['y','n']:
play_another = input("Do you want to play another game? [Enter 'y' for yes, 'n' for no]: ")
if play_another == 'n':
play = False
|
from util import reader_util, writer_util, data_util, config
from services import data_generator, solve
import data
import pprint, logging
if __name__ == '__main__':
data.init()
# Import engine subtypes
reader_util.import_engine_subtypes()
# Import removal information
data.removals_info, data.aos_cost = reader_util.import_removal_info(
filepath='data_to_read/removal_info.csv',
removals_data_storage=data.removals_info,
aos_cost_data_storage=data.aos_cost)
if config.first_run:
logging.info("FIRST_RUN is set to TRUE. All possible states and actions are going to be generated. This may take awhile.")
data_generator.generate_all_possible_states()
data_generator.generate_all_possible_actions()
for engine_subtype in data.engine_subtypes:
data.need_to_update_removal_info[engine_subtype] = True
logging.info("All files needed for future runs have been created. Please set FIRST_RUN to FALSE for any future runs on this machine.")
else:
# Import engine information
data.engines_info = reader_util.import_engine_info(
filepath='data_to_read/engine_info.csv',
data_storage=data.engines_info)
data_util.validate_removal_and_engine_info()
# Import engine subtype data
reader_util.import_engine_subtype_data()
data_util.validate_engine_subtype_data()
# Generate all possible removal situations if removal info has been updated
for engine_subtype in data.engine_subtypes:
if data.need_to_update_removal_info[engine_subtype]:
data_generator.generate_all_possible_removal_situations(
engine_subtype=engine_subtype)
# Import all possible states
data.all_possible_states = reader_util.import_all_possible_states(
filepath='data_to_read/all_possible_states.csv',
data_storage=data.all_possible_states)
# Import all possible actions
data.all_possible_actions = reader_util.import_all_possible_actions(
data_storage=data.all_possible_actions)
data_util.minimize_states_and_actions_to_iterate()
# Import all possible removal situations
data.all_possible_removal_situations = reader_util.import_all_possible_removal_situations(
data_storage=data.all_possible_removal_situations)
# Import future data for regression
reader_util.import_future_data()
# Solve Finite Horizon MDP
for engine_subtype in data.engine_subtypes:
solver = solve.FiniteHorizonMDPSolver(engine_subtype)
solver.solve_MDP()
|
# Copyright 2016 Rackspace US Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from six.moves.urllib import parse
from tempest.lib.common import rest_client
class ListenersClient(rest_client.RestClient):
"""Tests Listeners API."""
_LISTENERS_URL = "v1/loadbalancers/{lb_id}/listeners"
_LISTENER_URL = "{base_url}/{{listener_id}}".format(
base_url=_LISTENERS_URL)
_LISTENER_STATS_URL = "{base_url}/stats".format(base_url=_LISTENER_URL)
def list_listeners(self, lb_id, params=None):
"""List all listeners."""
url = self._LISTENERS_URL.format(lb_id=lb_id)
if params:
url = '{0}?{1}'.format(url, parse.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBodyList(resp, body)
def get_listener(self, lb_id, listener_id, params=None):
"""Get listener details."""
url = self._LISTENER_URL.format(lb_id=lb_id, listener_id=listener_id)
if params:
url = '{0}?{1}'.format(url, parse.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def create_listener(self, lb_id, **kwargs):
"""Create a listener build."""
url = self._LISTENERS_URL.format(lb_id=lb_id)
post_body = jsonutils.dumps(kwargs)
resp, body = self.post(url, post_body)
body = jsonutils.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_listener(self, lb_id, listener_id, **kwargs):
"""Update an listener build."""
url = self._LISTENER_URL.format(lb_id=lb_id, listener_id=listener_id)
put_body = jsonutils.dumps(kwargs)
resp, body = self.put(url, put_body)
body = jsonutils.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_listener(self, lb_id, listener_id):
"""Delete an existing listener build."""
url = self._LISTENER_URL.format(lb_id=lb_id, listener_id=listener_id)
resp, body = self.delete(url)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def get_listener_stats(self, lb_id, listener_id, params=None):
"""Get listener statistics."""
url = self._LISTENER_STATS_URL.format(lb_id=lb_id,
listener_id=listener_id)
if params:
url = '{0}?{1}'.format(url, parse.urlencode(params))
resp, body = self.get(url)
body = jsonutils.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
|
import unittest
from rdflib import Literal
from light9.namespaces import L9
from light9.collector.device import toOutputAttrs, resolve
class TestUnknownDevice(unittest.TestCase):
def testFails(self):
self.assertRaises(NotImplementedError, toOutputAttrs, L9['bogus'], {})
class TestColorStrip(unittest.TestCase):
def testConvertDeviceToOutputAttrs(self):
out = toOutputAttrs(L9['ChauvetColorStrip'],
{L9['color']: Literal('#ff0000')})
self.assertEqual(
{
L9['mode']: 215,
L9['red']: 255,
L9['green']: 0,
L9['blue']: 0
}, out)
class TestDimmer(unittest.TestCase):
def testConvert(self):
self.assertEqual({L9['level']: 127},
toOutputAttrs(L9['SimpleDimmer'],
{L9['brightness']: .5}))
class TestMini15(unittest.TestCase):
def testConvertColor(self):
out = toOutputAttrs(L9['Mini15'], {L9['color']: '#010203'})
self.assertEqual(255, out[L9['dimmer']])
self.assertEqual(1, out[L9['red']])
self.assertEqual(2, out[L9['green']])
self.assertEqual(3, out[L9['blue']])
def testConvertRotation(self):
out = toOutputAttrs(L9['Mini15'], {
L9['rx']: Literal(90),
L9['ry']: Literal(45)
})
self.assertEqual(42, out[L9['xRotation']])
self.assertEqual(127, out[L9['xFine']])
self.assertEqual(47, out[L9['yRotation']])
self.assertEqual(207, out[L9['yFine']])
self.assertEqual(0, out[L9['rotationSpeed']])
class TestResolve(unittest.TestCase):
def testMaxes1Color(self):
# do not delete - this one catches a bug in the rgb_to_hex(...) lines
self.assertEqual('#ff0300', resolve(None, L9['color'], ['#ff0300']))
def testMaxes2Colors(self):
self.assertEqual('#ff0400',
resolve(None, L9['color'], ['#ff0300', '#000400']))
def testMaxes3Colors(self):
self.assertEqual(
'#112233',
resolve(None, L9['color'], ['#110000', '#002200', '#000033']))
|
#!/usr/bin/env python
"""
Read adapter data while decoding ext2 custom fields
"""
import sys
from json import dumps
from common.utils.json_format import MessageToDict
from voltha.protos import adapter_pb2
adapter = adapter_pb2.Adapter()
binary = sys.stdin.read()
adapter.ParseFromString(binary)
print dumps(MessageToDict(adapter, strict_any_handling=False))
|
from __future__ import annotations
import enum
__all__ = ("Event",)
class Event(str, enum.Enum):
READY = "READY"
WILDCARD = "*"
CHANNEL_CREATE = "CHANNEL_CREATE"
CHANNEL_UPDATE = "CHANNEL_UPDATE"
CHANNEL_DELETE = "CHANNEL_DELETE"
CHANNEL_PINS_UPDATE = "CHANNEL_PINS_UPDATE"
THREAD_CREATE = "THREAD_CREATE"
THREAD_UPDATE = "THREAD_UPDATE"
THREAD_DELETE = "THREAD_DELETE"
THREAD_LIST_SYNC = "THREAD_LIST_SYNC"
THREAD_MEMBER_UPDATE = "THREAD_MEMBER_UPDATE"
THREAD_MEMBERS_UPDATE = "THREAD_MEMBERS_UPDATE"
GUILD_CREATE = "GUILD_CREATE"
GUILD_UPDATE = "GUILD_UPDATE"
GUILD_DELETE = "GUILD_DELETE"
GUILD_BAN_ADD = "GUILD_BAN_ADD"
GUILD_BAN_REMOVE = "GUILD_BAN_REMOVE"
GUILD_EMOJIS_UPDATE = "GUILD_EMOJIS_UPDATE"
GUILD_STICKERS_UPDATE = "GUILD_STICKERS_UPDATE"
GUILD_INTEGRATIONS_UPDATE = "GUILD_INTEGRATIONS_UPDATE"
GUILD_MEMBER_ADD = "GUILD_MEMBER_ADD"
GUILD_MEMBER_REMOVE = "GUILD_MEMBER_REMOVE"
GUILD_MEMBER_UPDATE = "GUILD_MEMBER_UPDATE"
GUILD_MEMBERS_CHUNK = "GUILD_MEMBERS_CHUNK"
GUILD_ROLE_CREATE = "GUILD_ROLE_CREATE"
GUILD_ROLE_UPDATE = "GUILD_ROLE_UPDATE"
GUILD_ROLE_DELETE = "GUILD_ROLE_DELETE"
GUILD_SCHEDULED_EVENT_CREATE = "GUILD_SCHEDULED_EVENT_CREATE"
GUILD_SCHEDULED_EVENT_UPDATE = "GUILD_SCHEDULED_EVENT_UPDATE"
GUILD_SCHEDULED_EVENT_DELETE = "GUILD_SCHEDULED_EVENT_DELETE"
GUILD_SCHEDULED_EVENT_USER_ADD = "GUILD_SCHEDULED_EVENT_USER_ADD"
GUILD_SCHEDULED_EVENT_USER_REMOVE = "GUILD_SCHEDULED_EVENT_USER_REMOVE"
INTEGRATION_CREATE = "INTEGRATION_CREATE"
INTEGRATION_UPDATE = "INTEGRATION_UPDATE"
INTEGRATION_DELETE = "INTEGRATION_DELETE"
INVITE_CREATE = "INVITE_CREATE"
INVITE_DELETE = "INVITE_DELETE"
MESSAGE_CREATE = "MESSAGE_CREATE"
MESSAGE_UPDATE = "MESSAGE_UPDATE"
MESSAGE_DELETE = "MESSAGE_DELETE"
MESSAGE_DELETE_BULK = "MESSAGE_DELETE_BULK"
MESSAGE_REACTION_ADD = "MESSAGE_REACTION_ADD"
MESSAGE_REACTION_REMOVE = "MESSAGE_REACTION_REMOVE"
MESSAGE_REACTION_REMOVE_ALL = "MESSAGE_REACTION_REMOVE_ALL"
MESSAGE_REACTION_REMOVE_EMOJI = "MESSAGE_REACTION_REMOVE_EMOJI"
PRESENCE_UPDATE = "PRESENCE_UPDATE"
STAGE_INSTANCE_CREATE = "STAGE_INSTANCE_CREATE"
STAGE_INSTANCE_DELETE = "STAGE_INSTANCE_DELETE"
STAGE_INSTANCE_UPDATE = "STAGE_INSTANCE_UPDATE"
TYPING_START = "TYPING_START"
VOICE_STATE_UPDATE = "VOICE_STATE_UPDATE"
VOICE_SERVER_UPDATE = "VOICE_SERVER_UPDATE"
WEBHOOKS_UPDATE = "WEBHOOKS_UPDATE"
|
from tree.elements import Node
class _Collection(Node):
_required_attributes = {"backup_set", "backup_date"}
def calculated_attributes(self):
return {'count': len(self._children)}
def append(self, event):
self._children.append(event)
def extend(self, events):
self._children.extend(events)
def sort(self):
self._children.sort(key=lambda event: int(event["date"]))
class _Event(Node):
_required_attributes = {"date"}
_optional_attributes = {'contact_name', 'readable_date'}
class MessageBox(object):
RECEIVED = 1
SENT = 2
DRAFT = 3
class _Message(_Event):
_required_attributes = _Event._required_attributes | {'address', 'read', 'locked'}
_optional_attributes = _Event._optional_attributes | {'date_sent'}
class AddressType(object):
BCC = 129
CC = 130
FROM = 137
TO = 151
class Sms(_Message):
_required_attributes = _Message._required_attributes | {'protocol', 'type', 'body', 'toa', 'sc_toa', 'service_center', 'status'}
_children = None
def calculated_attributes(self):
ret = super(_Message, self).calculated_attributes()
ret.update({'subject': 'null'})
return ret
class Mms(_Message):
#TODO: some are probably calculated fields or optional fields
_required_attributes = _Message._required_attributes | {'text_only', 'ct_t', 'msg_box', 'sub', 'sequence_time', 'seen', 'rr', 'ct_cls', 'retr_txt_cs', 'ct_l', 'phone_id', 'm_size', 'exp', 'sub_cs', 'st', 'creator', 'tr_id', 'sub_id', 'resp_st', 'm_id', 'pri', 'd_rpt', 'd_tm', 'read_status', 'retr_txt', 'resp_txt', 'rpt_a', 'star_status', 'm_cls', 'retr_st'}
def __init__(self):
super(_Message, self).__init__()
self._children.append(Parts())
self._children.append(Addrs())
@staticmethod
def _message_type(message_box):
if message_box == MessageBox.DRAFT:
return "null"
elif message_box == MessageBox.RECEIVED:
return "132"
elif message_box == MessageBox.SENT:
return "128"
else:
raise RuntimeError("Unknown direction {}".format(direction))
def calculated_attributes(self):
ret = super(_Message, self).calculated_attributes()
ret.update({'v': 16, 'm_type': self._message_type(self['msg_box'])})
return ret
def add_part(self, part):
self._children[0]._children.append(part)
def add_addr(self, type, address):
a = Addr()
a["type"] = type
a["address"] = address
self._children[-1]._children.append(a)
class Parts(Node):
pass
class Part(Node):
_required_attributes = {'seq', 'ct', 'name', 'chset', 'cd', 'fn', 'cid', 'cl', 'ctt_s', 'ctt_t', 'text'}
_optional_attributes = {'data'}
class Addrs(Node):
pass
class Addr(Node):
_required_attributes = {'address', 'type'}
def calculated_attributes(self):
# all examples seem to use the same charset?
return {'charset': '106'}
class Smses(_Collection):
@staticmethod
def filename_prefix():
return "sms-"
class CallType(object):
INCOMING = 1
OUTGOING = 2
MISSED = 3
VOICEMAIL = 4
REJECTED = 5
REFUSED_LIST = 6
class NumberPresentation(object):
ALLOWED = 1
RESTRICTED = 2
UNKNOWN = 3
PAYPHONE = 4
class Call(_Event):
_required_attributes = _Event._required_attributes | {'number', 'duration', 'type', 'presentation'}
children = None
class Calls(_Collection):
@staticmethod
def filename_prefix():
return "calls-"
|
"""
Devops challenge
(c) 2020 - GHGSat inc.
"""
from pathlib import Path
import click
from PIL import Image
@click.command()
@click.argument("input", type=click.Path(exists=True, dir_okay=True, file_okay=False))
@click.argument("output", type=click.Path(exists=True, dir_okay=True, file_okay=False))
def main(input, output):
click.echo("Starting up....")
input = Path(input)
output = Path(output)
click.echo(f"Going through {input} and writing to {output}")
for image in input.iterdir():
output_image = output / image.name
click.echo(f"in: {image}, out: {output_image}")
process(image, output_image)
click.echo("Done")
def process(input, output):
"""
Perform advanced image process on image at input and write result to
ouput
"""
image = Image.open(input)
new_image = image.rotate(90, expand=True)
new_image.save(output)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import gettext
import os
import sys
import transaction
from argparse import ArgumentParser
from sqlalchemy import func
from c2cgeoportal_commons.testing import get_session
from c2cgeoportal_geoportal.scripts import fill_arguments, get_appsettings
def main():
parser = ArgumentParser(
prog=sys.argv[0], add_help=True,
description="Tool to fill the tsearch table (full-text search) from the theme information.",
)
locale_path_1 = os.path.join("{package}_geoportal", "locale", "")
locale_path_2 = os.path.join("geoportal", locale_path_1)
locale_path = locale_path_2 if os.path.exists("geoportal") else locale_path_1
parser.add_argument(
"--locale-folder",
default=locale_path,
help="The folder where the locale files are stored",
)
parser.add_argument(
"--interfaces",
nargs='+',
help="the interfaces to export",
)
parser.add_argument(
"--duplicate-name",
action="store_true",
dest="name",
help="allows to add a name more than one time,\n"
"by default if we find more than one element with the same name "
"only one will be imported",
)
parser.add_argument(
"--no-themes",
action="store_false",
dest="themes",
help="do not import the themes",
)
parser.add_argument(
"--no-blocks",
action="store_false",
dest="blocks",
help="do not import the blocks (first level layer groups)",
)
parser.add_argument(
"--no-folders",
action="store_false",
dest="folders",
help="do not import the folders (tree folders)",
)
parser.add_argument(
"--no-layers",
action="store_false",
dest="layers",
help="do not import the layers (tree leaf)",
)
parser.add_argument(
"--package",
help="the application package",
)
fill_arguments(parser)
options = parser.parse_args()
settings = get_appsettings(options)
with transaction.manager:
session = get_session(settings, transaction.manager)
Import(session, settings, options)
class Import:
def __init__(self, session, settings, options):
self.options = options
self.imported = set()
package = settings["package"]
self.fts_languages = settings["fulltextsearch"]["languages"]
self.languages = settings["available_locale_names"]
# must be done only once we have loaded the project config
from c2cgeoportal_commons.models.main import FullTextSearch, Interface, Theme, Role
self.session = session
self.session.execute(FullTextSearch.__table__.delete().where(FullTextSearch.from_theme == True)) # noqa
self._ = {}
for lang in self.languages:
try:
self._[lang] = gettext.translation(
"{}_geoportal-client".format(package),
options.locale_folder.format(package=package),
[lang],
)
except OSError as e:
self._[lang] = gettext.NullTranslations()
print("Warning: {} (language: {})".format(e, lang))
query = self.session.query(Interface)
if options.interfaces is not None:
query = query.filter(
Interface.name.in_(options.interfaces)
)
self.interfaces = query.all()
self.public_theme = {}
self.public_group = {}
for interface in self.interfaces:
self.public_theme[interface.id] = []
self.public_group[interface.id] = []
for theme in self.session.query(Theme).filter_by(public=True).all():
self._add_theme(theme)
for role in self.session.query(Role).all():
for theme in self.session.query(Theme).all():
self._add_theme(theme, role)
def _add_fts(self, item, interface, action, role):
from c2cgeoportal_commons.models.main import FullTextSearch
key = (
item.name if self.options.name else item.id,
interface.id,
role.id if role is not None else None
)
if key not in self.imported:
self.imported.add(key)
for lang in self.languages:
fts = FullTextSearch()
fts.label = self._[lang].gettext(item.name)
fts.role = role
fts.interface = interface
fts.lang = lang
fts.public = role is None
fts.ts = func.to_tsvector(self.fts_languages[lang], fts.label)
fts.actions = [{
"action": action,
"data": item.name,
}]
fts.from_theme = True
self.session.add(fts)
def _add_theme(self, theme, role=None):
fill = False
for interface in self.interfaces:
if interface in theme.interfaces:
for child in theme.children:
fill = self._add_block(child, interface, role) or fill
if fill and self.options.themes:
if role is None:
self.public_theme[interface.id].append(theme.id)
if role is None or theme.id not in self.public_theme[interface.id]:
self._add_fts(theme, interface, "add_theme", role)
def _add_block(self, group, interface, role):
return self._add_group(group, interface, self.options.blocks, role)
def _add_folder(self, group, interface, role):
return self._add_group(group, interface, self.options.folders, role)
def _add_group(self, group, interface, export, role):
from c2cgeoportal_commons.models.main import LayerGroup
fill = False
for child in group.children:
if isinstance(child, LayerGroup):
fill = self._add_folder(child, interface, role) or fill
else:
fill = self._add_layer(child, interface, role) or fill
if fill and export:
if role is None:
self.public_group[interface.id].append(group.id)
if role is None or group.id not in self.public_group[interface.id]:
self._add_fts(group, interface, "add_group", role)
return fill
@staticmethod
def _layer_visible(layer, role):
for restrictionarea in layer.restrictionareas:
if role in restrictionarea.roles:
return True
return False
def _add_layer(self, layer, interface, role):
if role is None:
fill = layer.public and interface in layer.interfaces
else:
fill = interface in layer.interfaces and not layer.public and \
self._layer_visible(layer, role)
if fill and self.options.layers:
self._add_fts(layer, interface, "add_layer", role)
return fill
|
import pickle
import time
import config
from utils.graphwave.graphwave import *
from utils.sparse_matrix_factorization import *
def sequence2list(filename):
graphs = dict()
with open(filename, 'r') as f:
for line in f:
walks = line.strip().split('\t')[:config.max_sequence+1]
# put message/cascade id into graphs dictionary, value is a list
graphs[walks[0]] = list()
for i in range(1, len(walks)):
nodes = walks[i].split(":")[0]
time = walks[i].split(":")[1]
graphs[walks[0]] \
.append([[int(x) for x in nodes.split(",")],
int(time)])
return graphs
def read_labels_and_sizes(filename):
labels = dict()
sizes = dict()
with open(filename, 'r') as f:
for line in f:
parts = line.split('\t')
# parts[-1] means the incremental popularity
labels[parts[0]] = parts[-1]
# parts[3] means the observed popularity
sizes[parts[0]] = int(parts[3])
return labels, sizes
def write_cascade(graphs, labels, sizes, length, filename, gg_emb,
weight=True):
y_data = list()
size_data = list()
time_data = list()
rnn_index = list()
embedding = list()
n_cascades = 0
new_input = list()
global_input = list()
for key, graph in graphs.items():
label = labels[key].split()
y = int(label[0])
temp_time = list()
temp_index = list()
temp_size = len(graph)
for walk in graph:
# save publish time into temp_time list
temp_time.append(walk[1])
# save length of walk into temp_index
temp_index.append(len(walk[0]))
y_data.append(y)
size_data.append(temp_size)
time_data.append(temp_time)
rnn_index.append(temp_index)
n_cascades += 1
# padding the embedding
embedding_size = config.gc_emd_size
cascade_i = 0
cascade_size = len(graphs)
total_time = 0
for key, graph in graphs.items():
start_time = time.time()
new_temp = list()
global_temp = list()
dg = nx.DiGraph()
nodes_index = list()
list_edge = list()
cascade_embedding = list()
global_embedding = list()
times = list()
t_o = config.observation_time
for path in graph:
t = path[1]
if t >= t_o:
continue
nodes = path[0]
if len(nodes) == 1:
nodes_index.extend(nodes)
times.append(1)
continue
else:
nodes_index.extend([nodes[-1]])
if weight:
edge = (nodes[-1], nodes[-2], (1 - t / t_o))
times.append(1 - t / t_o)
else:
edge = (nodes[-1], nodes[-2])
list_edge.append(edge)
if weight:
dg.add_weighted_edges_from(list_edge)
else:
dg.add_edges_from(list_edge)
nodes_index_unique = list(set(nodes_index))
nodes_index_unique.sort(key=nodes_index.index)
g = dg
d = embedding_size / (2 * config.number_of_s)
if embedding_size % 4 != 0:
raise ValueError
chi, _, _ = graphwave_alg(g, np.linspace(0, 100, int(d)),
taus='auto', verbose=False,
nodes_index=nodes_index_unique,
nb_filters=config.number_of_s)
for node in nodes_index:
cascade_embedding.append(chi[nodes_index_unique.index(node)])
global_embedding.append(gg_emb[id2row[node]])
if weight:
cascade_embedding = np.concatenate([np.reshape(times, (-1, 1)), np.array(cascade_embedding)[:, 1:]], axis=1)
new_temp.extend(cascade_embedding)
global_temp.extend(global_embedding)
new_input.append(new_temp)
global_input.append(global_temp)
total_time += time.time() - start_time
cascade_i += 1
if cascade_i % 100 == 0:
speed = total_time / cascade_i
eta = (cascade_size - cascade_i) * speed
print("{}/{}, eta: {:.2f} minutes".format(
cascade_i, cascade_size, eta / 60))
with open(filename, 'wb') as fin:
pickle.dump((new_input, global_input, y_data), fin)
def get_max_size(sizes):
max_size = 0
for cascade_id in sizes:
max_size = max(max_size, sizes[cascade_id])
return max_size
def get_max_length(graphs):
""" Get the max length among sequences. """
max_length = 0
for cascade_id in graphs:
# traverse the graphs for max length sequence
for sequence in graphs[cascade_id]:
max_length = max(max_length, len(sequence[0]))
return max_length
if __name__ == "__main__":
time_start = time.time()
# get the information of nodes/users of cascades
graphs_train = sequence2list(config.cascade_shortestpath_train)
graphs_val = sequence2list(config.cascade_shortestpath_validation)
graphs_test = sequence2list(config.cascade_shortestpath_test)
# get the information of labels and sizes of cascades
labels_train, sizes_train = read_labels_and_sizes(config.cascade_train)
labels_val, sizes_val = read_labels_and_sizes(config.cascade_validation)
labels_test, sizes_test = read_labels_and_sizes(config.cascade_test)
# find the max length of sequences
len_sequence = max(get_max_length(graphs_train),
get_max_length(graphs_val),
get_max_length(graphs_test))
print("Max length of sequence:", len_sequence)
print("Cascade graph embedding size:", config.gc_emd_size)
print("Number of scale s:", config.number_of_s)
# load global graph and generate id2row
with open(config.global_graph, 'rb') as f:
gg = pickle.load(f)
# sparse matrix factorization
model = SparseMatrixFactorization(gg, config.gg_emd_size)
features_matrix = model.pre_factorization(model.matrix, model.matrix)
np.save(config.global_embedding, features_matrix)
ids = [int(xovee) for xovee in gg.nodes()]
id2row = dict()
i = 0
for id in ids:
id2row[id] = i
i += 1
# load global graph embeddings
gg_emb = np.load(config.global_embedding + '.npy')
print("Start writing train set into file.")
write_cascade(graphs_train, labels_train, sizes_train, len_sequence,
config.train, gg_emb=gg_emb)
print("Start writing validation set into file.")
write_cascade(graphs_val, labels_val, sizes_val,
len_sequence,
config.val, gg_emb=gg_emb)
print("Start writing test set into file.")
write_cascade(graphs_test, labels_test, sizes_test, len_sequence,
config.test, gg_emb=gg_emb)
time_end = time.time()
print("Processing time: {0:.2f}s".format(time_end - time_start))
|
# Copyright (c) 2021 Cristian Patrasciuc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from unittest.mock import Mock
from model.player_pair import PlayerPair
from ui.score_view import ScoreView
from ui.test_utils import GraphicUnitTest
class ScoreViewTest(GraphicUnitTest):
def test_one_game(self):
score_history = [(PlayerPair(78, 32), PlayerPair(2, 0))]
score_view = ScoreView(score_history)
score_view.open()
self.render(score_view)
score_view.dismiss()
def test_one_bummerl(self):
score_history = [
(PlayerPair(78, 32), PlayerPair(2, 0)),
(PlayerPair(42, 67), PlayerPair(0, 1)),
(PlayerPair(52, 40), PlayerPair(3, 0)),
(PlayerPair(62, 58), PlayerPair(0, 1)),
(PlayerPair(10, 85), PlayerPair(0, 2)),
(PlayerPair(66, 50), PlayerPair(1, 0)),
(PlayerPair(0, 82), PlayerPair(0, 3)),
]
score_view = ScoreView(score_history)
score_view.open()
self.render(score_view)
score_view.dismiss()
def test_maximum_number_of_games(self):
score_history = [
(PlayerPair(60, 60), PlayerPair(1, 0)),
(PlayerPair(60, 60), PlayerPair(0, 1)),
(PlayerPair(60, 60), PlayerPair(1, 0)),
(PlayerPair(60, 60), PlayerPair(0, 1)),
(PlayerPair(60, 60), PlayerPair(1, 0)),
(PlayerPair(60, 60), PlayerPair(0, 1)),
(PlayerPair(60, 60), PlayerPair(1, 0)),
(PlayerPair(60, 60), PlayerPair(0, 1)),
(PlayerPair(60, 60), PlayerPair(1, 0)),
(PlayerPair(60, 60), PlayerPair(0, 1)),
(PlayerPair(60, 60), PlayerPair(1, 0)),
(PlayerPair(60, 60), PlayerPair(0, 1)),
(PlayerPair(60, 60), PlayerPair(1, 0)),
]
score_view = ScoreView(score_history)
score_view.open()
self.render(score_view)
score_view.dismiss()
def test_show_score_view(self):
score_history = [
(PlayerPair(78, 32), PlayerPair(2, 0)),
(PlayerPair(42, 67), PlayerPair(0, 1)),
(PlayerPair(52, 40), PlayerPair(3, 0)),
(PlayerPair(62, 58), PlayerPair(0, 1)),
(PlayerPair(10, 85), PlayerPair(0, 2)),
(PlayerPair(66, 50), PlayerPair(1, 0)),
(PlayerPair(0, 82), PlayerPair(0, 3)),
]
dismiss_callback = Mock()
score_view = ScoreView.show_score_view(score_history, dismiss_callback)
self.render(score_view)
dismiss_callback.assert_not_called()
score_view.dismiss()
dismiss_callback.assert_called_once()
|
# -*- coding: utf-8 -*-
"""zfs-replicate CLI interface."""
|
"""Rotates all bodies along world z-direction by 45 degrees:
"""
from openravepy import *
import numpy
env = Environment() # create openrave environment
env.SetViewer('qtcoin') # attach viewer (optional)
env.Load('data/lab1.env.xml') # load a simple scene
Tz = matrixFromAxisAngle([0,0,numpy.pi/4])
with env:
for body in env.GetBodies():
body.SetTransform(numpy.dot(Tz,body.GetTransform()))
|
import datetime as dt
import errno
import json
import os
import ntpath
import re
import shutil
import pandas as pd
import requests
import yaml
import csv
import sys
import subprocess
from teradatasql import OperationalError
from .dbutil import df_to_sql, sql_to_df
import webbrowser
import tdcsm
from pathlib import Path
from .utils import Utils # includes Logger class
# todo create docstring for all methods
class tdcoa:
# paths
approot = '.'
configpath = ''
secretpath = ''
systemspath = ''
filesetpath = ''
outputpath = ''
version = "0.4.1.6"
skip_dbs = False # skip ALL dbs connections / executions
manual_run = False # skip dbs executions in execute_run() but not upload_to_transcend()
# also skips /*{{save:}}*/ special command
# dictionaries
secrets = {}
filesets = {}
files = {}
systems = {}
folders = {}
substitutions = {}
transcend = {}
settings = {}
def __init__(self, approot='.', printlog=True, config='config.yaml', secrets='secrets.yaml', filesets='filesets.yaml', systems='source_systems.yaml', refresh_defaults=False, skip_dbs=False):
self.bufferlog = True
self.printlog = printlog
self.approot = os.path.join('.', approot)
self.configpath = os.path.join(self.approot, config)
self.secretpath = os.path.join(self.approot, secrets)
self.systemspath = os.path.join(self.approot, systems)
self.refresh_defaults = refresh_defaults
self.utils = Utils(self.version) # utilities class. inherits Logger class
self.utils.log('tdcoa started', header=True)
self.utils.log('time', str(dt.datetime.now()))
self.utils.log('app root', self.approot)
self.utils.log('config file', self.configpath)
self.utils.log('source systems file', self.systemspath)
self.utils.log('secrets file', self.secretpath)
self.utils.log('tdcoa version', self.version)
self.unique_id = dt.datetime.now().strftime("%m%d%Y%H%M") # unique id to append to table
self.motd_url = 'file://' + os.path.abspath(os.path.join(self.approot, 'motd.html'))
# filesets.yaml is validated at download time
self.reload_config(skip_dbs=skip_dbs)
def add_filesets_to_systems(self):
# read in fileset.yaml file to dictionary:
self.utils.log('adding all filesets to all systems (in memory, not disk)')
for sysname, sysobject in self.systems.items(): # iterate systems object...
i = 0
if 'filesets' not in sysobject or type(sysobject['filesets']) != dict: sysobject['filesets']={}
for fsname, fsobject in self.filesets.items(): # iterate fileset master yaml...
if fsname not in sysobject['filesets']:
sysobject['filesets'][fsname] = {'active':False} # add if missing
i+=1
self.utils.log('added %i new filesets to' %i, sysname)
def reload_config(self, config='', secrets='', systems='', refresh_defaults=False, skip_dbs=False, skip_git=False):
"""Reloads configuration YAML files (config & secrets) used as
process driver. This will also perform any local environment checks,
such as creating missing folders (download|sql|run|output), change
siteid and transcend definitions, change gitfile and host pointers,
change runlog.txt location, etc. This process is called once during
class instance initialization phase (i.e., "coa = tdcoa()" )
Parameters:
config == Relative file path for the config.yaml file.
Default is ./config.yaml, created if missing
secrets == Relative file path for the secrets.yaml file.
Default is ./secrets.yaml, created if missing
Examples:
from tdcsm.tdcoa import tdcoa
coa = tdcoa()
coa.reload_config() # reloads default config.yaml
coa.reload_config( config='./config_customerABC.yaml')
coa.reload_config( config='../configs/config_mthly_dbql.yaml')
coa.reload_config( config='./myconfig.yaml', secrets='../passwords.yaml')
# you can also specify config/secrets in class instantiation:
from tdcsm.tdcoa import tdcoa
coa = tdcoa( config='configABC.yaml', secrets='passwords.yaml')
"""
# dictionaries
self.secrets = {}
self.filesets = {}
self.systems = {}
self.folders = {}
self.substitutions = {}
self.transcend = {}
self.settings = {}
configpath = self.configpath if config == '' else os.path.join(self.approot, config)
secretpath = self.secretpath if secrets == '' else os.path.join(self.approot, secrets)
systemspath = self.systemspath if systems == '' else os.path.join(self.approot, systems)
self.refresh_defaults = refresh_defaults
self.utils.bufferlogs = True
self.utils.log('reload_config started', header=True)
self.utils.log('time', str(dt.datetime.now()))
self.utils.log('tdcoa version', self.version)
# ensure all required configuration files are present:
self.utils.log('checking core config files')
# these all sit in approot by default, so these are both filenames AND relative paths:
startfiles = ['secrets.yaml','config.yaml','source_systems.yaml','run_gui.py','run_gui']
startfilecontent = ''
for startfile in startfiles:
startfile_src = os.path.join(os.path.dirname(tdcsm.__file__), startfile)
startfile_ovr = os.path.join(self.approot,'0_override', startfile)
# honor parameter overrides:
if startfile == 'secrets.yaml': startfile_dst = secretpath
elif startfile == 'config.yaml': startfile_dst = configpath
elif startfile == 'source_systems.yaml': startfile_dst = systemspath
else: startfile_dst = os.path.join(self.approot, startfile)
# remove files if "refresh defaults" is requested via __init__ param
if self.refresh_defaults and os.path.isfile(startfile_dst) and startfiles != 'secrets.yaml':
os.remove(startfile_dst)
# if file is missing:
if not os.path.isfile(startfile_dst):
self.utils.log(' MISSING FILE', startfile)
# check if the file is in the 0_override folder... if so, use that:
if os.path.isfile(startfile_ovr):
self.utils.log(' Adding from 0_override')
with open(startfile_ovr) as f1:
startfilecontent = f1.read()
# if no override, pull from package directory
elif os.path.isfile(startfile_src):
self.utils.log(' Adding from installed package')
with open(startfile_src) as f1:
startfilecontent = f1.read()
else: # this is just-in-case, until I can be sure above logic is working.
if startfile == 'coa.py': startfilecontent = 'from tdcsm.tdgui import coa\nc=coa()'
if startfile == 'secrets.yaml': startfilecontent = 'secrets:\n td_quicklook: "qlikuserid"\n td_password: "qlikpassword"'
if startfile == 'config.yaml': startfilecontent = self.yaml_config()
if startfile == 'source_systems.yaml': startfilecontent = self.yaml_systems()
self.utils.log(' Adding from internal string (should not happen)')
with open(startfile_dst, 'w') as f2:
f2.write(startfilecontent)
# load secrets.yaml
with open(secretpath, 'r') as fh:
secretstr = fh.read()
self.secrets = yaml.load(secretstr, Loader=yaml.FullLoader)['secrets']
self.utils.secrets = self.secrets # update secrets attribute in logger
# load config.yaml
with open(configpath, 'r') as fh:
configstr = fh.read()
configyaml = yaml.load(configstr, Loader=yaml.FullLoader)
configstr = self.utils.substitute(configstr, self.secrets, 'secrets')
configstr = self.utils.substitute(configstr, configyaml['substitutions'], 'config:substitutions')
configstr = self.utils.substitute(configstr, configyaml['folders'], 'config:folders')
configstr = self.utils.substitute(configstr, configyaml['settings'], 'config:settings')
configstr = self.utils.substitute(configstr, configyaml['transcend'], 'config:transcend')
configyaml = yaml.load(configstr, Loader=yaml.FullLoader)
# load substitutions
self.utils.log('loading dictionary', 'substitutions')
self.substitutions = configyaml['substitutions']
with open(systemspath, 'r') as fh:
systemsstr = fh.read()
systemsstr = self.utils.substitute(systemsstr, self.secrets, 'secrets')
systemsstr = self.utils.substitute(systemsstr, self.substitutions, 'systems:substitutions')
systemsyaml = yaml.load(systemsstr, Loader=yaml.FullLoader)
# check and set Transcend connection information
self.utils.log('loading dictionary', 'transcend')
self.transcend = configyaml['transcend']
self.utils.check_setting(self.transcend,
required_item_list=['username', 'password', 'host', 'logmech', 'db_coa', 'db_region', 'db_stg'],
defaults=['{td_quicklook}', '{td_password}', 'tdprdcop3.td.teradata.com', 'TD2',
'adlste_coa', 'adlste_westcomm', 'adlste_coa_stg'])
self.transcend['connectionstring'] = 'teradatasql://%s:%s@%s/?logmech=%s' % (
self.transcend['username'],
self.transcend['password'],
self.transcend['host'],
self.transcend['logmech'])
# check and set required Folders
self.utils.log('loading dictionary', 'folders')
self.folders = configyaml['folders']
self.utils.check_setting(self.folders, required_item_list=['download', 'sql', 'run', 'output', 'override'],
defaults=['1_download', '2_sql_store', '3_ready_to_run', '4_output', '0_override'])
# check and set required Settings
self.utils.log('loading dictionary', 'settings')
self.settings = configyaml['settings']
if skip_dbs: self.settings['skip_dbs'] = 'True'
self.utils.check_setting(self.settings,
required_item_list=['githost', 'gitfileset', 'gitmotd', 'localfilesets',
'run_non_fileset_folders', 'gui_show_dev_filesets',
'skip_dbs'],
defaults=['https://raw.githubusercontent.com/tdcoa/sql/master/',
'filesets.yaml',
'motd.txt',
'{download}/filesets.yaml',
'True',
'False',
'python','False'])
if self.utils.validate_boolean(self.settings['skip_dbs'],'bool'):
self.utils.log('SKIP_DBS == TRUE, emulating all database connections', warning=True)
self.filesetpath = self.settings['localfilesets']
# create missing folders
for nm, subfo in self.folders.items():
fopath = os.path.join(self.approot, subfo)
# core folders
if not os.path.exists(fopath):
self.utils.log('creating missing folder', fopath)
os.mkdir(fopath)
# unbuffer logs once we have a valid "run" folder
self.utils.logpath = os.path.join(self.approot, self.folders['run'], 'runlog.txt')
# if os.path.isfile(self.logpath): os.remove(self.logpath)
self.utils.bufferlogs = False
self.utils.log('unbuffering log to "run" folder')
# setup filesets.yaml
if 'localfilesets' not in self.settings:
self.settings['localfilesets'] = os.path.join(self.folders['download'], 'filesets.yaml')
self.filesetpath = os.path.join(self.approot, self.settings['localfilesets'])
githost = self.settings['githost']
if githost[-1:] != '/':
githost = githost + '/'
self.utils.log('githost', githost)
giturl = githost + self.settings['gitfileset']
self.utils.log('downloading "filesets.yaml" from github')
self.utils.log(' requesting url', giturl)
# add skip_git check from config.settings
if 'skip_git' in self.settings and self.settings['skip_git']=='True':
skip_git = True
self.utils.log('setting found in config.yaml', 'skip_git: "True"')
# skip git download if requested
if skip_git:
self.utils.log('filesets.yaml download skipped, using cached local copy', warning=True)
else:
try:
filecontent = requests.get(giturl).content.decode('utf-8')
savepath = os.path.join(self.approot, self.settings['localfilesets'])
self.utils.log('saving filesets.yaml', savepath)
with open(savepath, 'w') as fh:
fh.write(filecontent)
self.utils.log('filesets.yaml saved')
except Exception as ex:
self.utils.log('filesets.yaml could not be downloaded, using cached local copy', warning=True)
# self.utils.log('Error: %s' %str(ex), indent=2)
# load filesets dictionary (active only)
self.utils.log('loading dictionary', 'filesets (active only)')
if not os.path.isfile(self.filesetpath):
self.utils.log('the filesets.yaml file is not found at the expected location: \n\t%s\n' %self.filesetpath, error=True)
self.utils.log('this might be caused by a network disallowing downloads from GitHub.com, or being offline entirely')
self.utils.log('downloading the filesets.yaml is a HARD REQUIREMENT, as that file defines all filesets in real-time')
self.utils.log('\nRecommended Actions:')
self.utils.log(' 1) manually download the fileset definition file here:\n https://raw.githubusercontent.com/tdcoa/sql/master/filesets/filesets.yaml')
self.utils.log(' 2) save to your "1_download" folder as "filesets.yaml"')
self.utils.log(' 3) to prevent waiting for the connection timeout, open your "config.yaml" file and in the "settings" section add:\n skip_git: "True" # (remember to match the indent)')
self.utils.log(' 4) click the "Reload Configs" button')
self.utils.log(' 5) plan to manually refresh the filesets.yaml file periodically\n\n')
self.utils.log('Finally Note: all other fileset collateral is likewise downloaded from github, so you are likely to hit similar errors during the Download phase.\n\n')
else:
with open(self.filesetpath, 'r') as fh:
filesetstr = fh.read()
filesetyaml = yaml.load(filesetstr, Loader=yaml.FullLoader)
if not filesetyaml:
msg = 'filesets.yaml appears empty, please make sure it contains valid yaml configuration.\n'
msg = msg + 'when in doubt: delete the existing filesets.yaml file from the "download" folder,\n'
msg = msg + 'and run the process again. When missing, it will create a default file of\n'
msg = msg + 'the correct format. When executing the "download_sql" command, the program\n'
msg = msg + 'will also re-download the latest filesets.yaml from github.'
self.utils.log(msg, error=True)
raise IOError(msg)
for setname, setobject in filesetyaml.items():
if str(setobject['active']).strip().lower() == 'true':
self.filesets.update({setname: setobject})
# load systems (no longer active only)
self.utils.log('loading system dictionaries')
for sysname, sysobject in systemsyaml['systems'].items():
# if self.utils.dict_active(sysobject, sysname): #<--- no more, really messed up lots of UI work before
self.systems[sysname] = sysobject
self.utils.log('LOADING SYSTEM', sysname)
# todo add default dbsversion and collection
self.utils.check_setting(self.systems[sysname],
required_item_list=['active', 'siteid', 'use', 'host',
'username', 'password',
'logmech', 'driver', 'encryption','dbsversion','collection',
'filesets'],
defaults=['True', 'siteid123', 'unknown', 'customer.host.missing.com',
'username_missing', 'password_missing',
'TD2', 'sqlalchemy', 'False','16.20','pdcr',
{}])
if sysobject['logmech'].strip() == '':
logmech = ''
else:
logmech = '/?logmech=%s' % sysobject['logmech']
sysobject['connectionstring'] = 'teradatasql://%s:%s@%s%s' % (sysobject['username'],
sysobject['password'],
sysobject['host'],
logmech)
# add filesets to systems, in memory only:
self.add_filesets_to_systems()
# not sure this is ever explicitly re-set
self.configpath = configpath
self.secretpath = secretpath
self.systemspath = systemspath
self.bteq_delim = '|~|'
bp=[]
bp.append('---------------------------------------------------------------------')
bp.append('--- add credentials below, all else should run & export automatically')
bp.append('.SET MAXERROR 1;')
bp.append('.SET SESSION TRANSACTION BTET;')
bp.append('.logmech TD2; --- example options: NTLM, KRB5, LDAP, TD2')
bp.append('.LOGON host/username,password;')
bp.append('.TITLEDASHES off;')
bp.append(".SEPARATOR '%s';" %self.bteq_delim)
bp.append(".SET NULL AS '';")
bp.append('.WIDTH 32000;')
bp.append('.RETLIMIT * *;')
bp.append('.SHOW CONTROLS;')
bp.append('---------------------------------------------------------------------')
self.bteq_prefix = '\n'.join(bp)
self.substitutions['YYYYMMDD'] = dt.datetime.today().strftime('%Y%m%d')
self.substitutions['YYYYMM'] = dt.datetime.today().strftime('%Y%m')
self.utils.log('done!')
self.utils.log('time', str(dt.datetime.now()))
def download_files(self, motd=True):
self.utils.log('download_files started', header=True)
self.utils.log('time', str(dt.datetime.now()))
githost = self.settings['githost']
if githost[-1:] != '/':
githost = githost + '/'
self.utils.log('githost', githost)
filesetcontent = ''
# download any control files first (motd.html, etc.)
# motd
giturl = githost + self.settings['gitmotd']
self.utils.log('downloading "motd.html" from github')
self.utils.log(' requesting url', giturl)
filecontent = requests.get(giturl).text
with open(os.path.join(self.approot, 'motd.html'), 'w') as fh:
fh.write(filecontent)
# open motd.html in browser
self.motd_url = 'file://' + os.path.abspath(os.path.join(self.approot, 'motd.html'))
if motd: webbrowser.open(self.motd_url)
# delete all pre-existing download folders
# Commented the below code, in order to make the script download only if the files doesn't exist
self.utils.recursively_delete_subfolders(os.path.join(self.approot, self.folders['download']))
# set proper githost for filesets
githost = githost + 'filesets/'
# iterate all active systems.filesets:
for sysname, sysobject in self.systems.items():
if self.utils.dict_active(sysobject, sysname, also_contains_key='filesets'): # must be ACTIVE (this test pre-dated systems.active change)
self.utils.log('\nINTERROGATING SYSTEM', sysname)
# get all filesets as defined in each system:
for sys_setname, sys_setobject in sysobject['filesets'].items():
if self.utils.dict_active(sys_setobject, sys_setname):
self.utils.log(' found fileset', sys_setname)
self.utils.log(' cross-referencing with filesets.yaml...')
# cross-reference to filesets in filesets.yaml
if sys_setname in self.filesets:
setname = sys_setname
setobject = self.filesets[setname]
if self.utils.dict_active(setobject, setname, also_contains_key='files'):
self.utils.log(' FILE SET FOUND', setname + ' [' + str(len(setobject['files'])) + ']')
savepath = os.path.join(self.approot, self.folders['download'], setname)
if not os.path.exists(savepath):
os.mkdir(savepath)
# download each file in the fileset
for file_key, file_dict in setobject['files'].items():
self.utils.log(' ' + ('-' * 50))
self.utils.log(' ' + file_key)
# check for matching dbversion
dbversion_match = True
if 'dbsversion' in file_dict.keys():
if sysobject['dbsversion'] in file_dict['dbsversion']:
dbversion_match = True
else:
dbversion_match = False
# check for matching collection
collection_match = True
if 'collection' in file_dict.keys():
if sysobject['collection'] in file_dict['collection']:
collection_match = True
else:
collection_match = False
# only download file if dbsversion and collection match
savefile = os.path.join(savepath, file_dict['gitfile'].split('/')[-1]) # save path
# Skip download if file already exists
file_exists = os.path.exists(savefile)
print(str(savefile))
if file_exists == True:
self.utils.log(' File %s already exists in the download folder, so skipping download' % str(file_dict['gitfile'].split('/')[-1]))
continue
if dbversion_match and collection_match:
self.utils.log(' downloading file', file_dict['gitfile'])
giturl = githost + file_dict['gitfile']
self.utils.log(' %s' % giturl)
response = requests.get(giturl)
if response.status_code == 200:
filecontent = response.content
self.utils.log(' saving file to', savefile)
with open(savefile, 'wb') as fh:
fh.write(filecontent)
else:
self.utils.log('Status Code: ' + str(
response.status_code) + '\nText: ' + response.text, error=True)
exit()
else:
self.utils.log(' diff dbsversion or collection, skipping')
self.utils.log(' ' + ('-' * 50) + '\n')
else: # not found
self.utils.log(' not found in filesets.yaml', sys_setname)
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
def copy_download_to_sql(self, overwrite=False):
self.utils.log('copy_download_to_sql started', header=True)
self.utils.log('copy files from download folder (by fileset) to sql folder (by system)')
self.utils.log('time', str(dt.datetime.now()))
downloadpath = os.path.join(self.approot, self.folders['download'])
sqlpath = os.path.join(self.approot, self.folders['sql'])
self.utils.recursively_delete_subfolders(sqlpath)
for sysname, sysobject in self.systems.items():
if self.utils.dict_active(sysobject, also_contains_key='filesets'): # must be ACTIVE (this test pre-dated systems.active change)
self.utils.log('processing system', sysname) # just shuffled log inside of active test
for setname, setobject in sysobject['filesets'].items():
self.utils.log('processing fileset', setname)
if self.utils.dict_active(setobject):
# define paths:
srcpath = os.path.join(self.approot, self.folders['download'], setname)
dstpath = os.path.join(self.approot, self.folders['sql'], sysname)
dstpath = os.path.join(dstpath, setname)
if not os.path.exists(dstpath):
os.makedirs(dstpath)
# purge existing, and copy over
if overwrite:
self.utils.recursive_delete(dstpath)
# loop through downloaded files and copy them to sql_store folder
# only copy if dbsversion and collection match
for downloaded_file in os.listdir(srcpath):
dbsversion_match = True
collection_match = True
# match downloaded file to fileset object so that we can compare collection & dbsversion
# note: fileset may not always have dbsversion or collection listed. always copy if thats true
for file_object, file_values in self.filesets['customer_data_space']['files'].items():
if downloaded_file == file_values['gitfile'].split('/')[-1]:
if 'dbsversion' in file_values:
if sysobject['dbsversion'] not in file_values['dbsversion']:
dbsversion_match = False # non-matching dbsversion: dont copy
if 'collection' in file_values:
if sysobject['collection'] not in file_values['collection']:
collection_match = False # non-matching collection: dont copy
break
# only copy if dbsversion and collection match (if given)
# todo add logging regarding which files are being skipped / copied
if dbsversion_match and collection_match:
shutil.copyfile(os.path.join(srcpath, downloaded_file), os.path.join(dstpath, downloaded_file))
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
def apply_override(self, override_folder='', target_folder=''):
self.utils.log('applying file override')
# apply folder default locations
if override_folder == '':
override_folder = self.folders['override']
override_folder = os.path.join(self.approot, override_folder)
if target_folder == '':
target_folder = self.folders['sql']
target_folder = os.path.join(self.approot, target_folder)
self.utils.log(' override folder', override_folder)
self.utils.log(' target_folder', target_folder)
copyops = {}
allfiles = []
# map files found in override folder
logdone = False
reloadconfig = False
for fo, subfos, files in os.walk(override_folder):
if fo == override_folder:
self.utils.log('\nprocessing files found in override root')
self.utils.log('these files replace any matching filename, regardless of subfolder location')
for file in files:
if file == 'config.yaml' or file == 'secrets.yaml':
# TODO: add source_systems.yaml -- or, maybe can be removed?
copyops[os.path.join(self.approot, file)] = os.path.join(override_folder, file)
reloadconfig = True
self.utils.log(' config file found, reload imminent', file)
elif file[:1] != '.':
allfiles.append(file)
self.utils.log(' root file found', file)
else:
if os.path.basename(fo)[:1] != '.':
if not logdone:
logdone = True
self.utils.log('\nprocessing files found in override subfolders')
self.utils.log(
'these files only replace filenames found in matching subfolders (and overrides root files)')
for file in files:
if file[:1] != '.':
specfile = os.path.join(fo, file).replace(override_folder, '.')
keydestfile = os.path.join(target_folder, specfile)
keydestfo = os.path.dirname(keydestfile)
if os.path.exists(keydestfo):
copyops[keydestfile] = os.path.join(override_folder, specfile)
self.utils.log(' subfolder file found', specfile)
else:
self.utils.log('target folder does not exist', keydestfo, warning=True)
# search for matching allfiles by crawling the target_folder
for fo, subfos, files in os.walk(target_folder):
for file in files:
keydestfile = os.path.join(fo, file)
if file in allfiles:
copyops[keydestfile] = os.path.join(override_folder, file)
# perform final copy:
self.utils.log('\nperform override file copy:')
for dstpath, srcpath in copyops.items():
self.utils.log(' source: %s' % srcpath)
self.utils.log(' target: %s' % dstpath)
shutil.copyfile(srcpath, dstpath)
if reloadconfig:
self.reload_config()
self.utils.log('\napply override complete!')
def prepare_sql(self, sqlfolder='', override_folder=''):
self.copy_download_to_sql() # moved from end of download_files() to here
self.utils.log('prepare_sql started', header=True)
self.utils.log('time', str(dt.datetime.now()))
if sqlfolder != '':
self.utils.log('sql folder', sqlfolder)
self.folders['sql'] = sqlfolder
self.utils.log(' sql folder', self.folders['sql'])
self.utils.log(' run folder', self.folders['run'])
self.apply_override(target_folder=sqlfolder, override_folder=override_folder)
# clear pre-existing subfolders in "run" directory (file sets)
self.utils.log('empty run folder entirely')
self.utils.recursively_delete_subfolders(os.path.join(self.approot, self.folders['run']))
# iterate all system level folders in "sql" folder...
for sysfolder in os.listdir(os.path.join(self.approot, self.folders['sql'])):
if os.path.isdir(os.path.join(self.approot, self.folders['sql'])):
self.utils.log('\n' + '-' * self.utils.logspace)
self.utils.log('SYSTEM FOLDER FOUND', sysfolder)
if sysfolder not in self.systems or self.utils.dict_active(self.systems[sysfolder]) is False: # must be ACTIVE (this test pre-dated systems.active change)
self.utils.log('folder not defined as an active system, skipping...')
else:
# iterate all fileset subfolders in system folder...
for setfolder in os.listdir(os.path.join(self.approot, self.folders['sql'], sysfolder)):
if os.path.isdir(os.path.join(self.approot, self.folders['sql'], sysfolder, setfolder)):
self.utils.log('FILESET FOLDER FOUND', setfolder)
# what to do with non-fileset folders? well, depends:
_continue = False
if setfolder not in self.filesets:
self.utils.log(' folder does NOT MATCH a defined fileset name', setfolder)
if self.settings['run_non_fileset_folders'].strip().lower() == 'true':
self.utils.log(
' however setting: "run_non_fileset_folders" equals "true", continuing...')
_continue = True
else:
self.utils.log(' and setting: "run_non_fileset_folders" not equal "true", skipping...')
_continue = False
else: # setfolder in self.filesets
self.utils.log(' folder MATCHES a defined fileset name', setfolder)
if not self.utils.dict_active(self.systems[sysfolder]['filesets'][setfolder]):
self.utils.log(
" however the system's fileset-override is marked as in-active, skipping...")
_continue = False
elif not self.utils.dict_active(self.filesets[setfolder]):
self.utils.log(' however fileset itself is marked as in-active, skipping...')
_continue = False
else:
self.utils.log(' and fileset record is active, continuing...')
_continue = True
if _continue:
# define paths
sqlpath = os.path.join(self.approot, self.folders['sql'], sysfolder, setfolder)
runpath = os.path.join(self.approot, self.folders['run'], sysfolder)
# TODO: combine into single makedirs statement instead of 2 mkdir
if not os.path.isdir(runpath):
self.utils.log(' creating system folder', runpath)
os.mkdir(runpath)
runpath = os.path.join(self.approot, self.folders['run'], sysfolder, setfolder)
if not os.path.isdir(runpath):
self.utils.log(' creating fileset folder', runpath)
os.mkdir(runpath)
self.utils.recursive_copy(sqlpath, runpath, replace_existing=True)
# iterate all .coa.sql files in the fileset subfolder...
for runfile in os.listdir(runpath):
runfilepath = os.path.join(runpath, runfile)
if os.path.isfile(runfilepath) and runfile[-8:] == '.coa.sql':
# if .coa.sql file, read into memory
self.utils.log('\n PROCESSING COA.SQL FILE', runfile)
with open(runfilepath, 'r') as fh:
runfiletext = fh.read()
self.utils.log(' characters in file', str(len(runfiletext)))
# SUBSTITUTE values for: system-fileset override [source_systems.yaml --> filesets]
if setfolder in self.systems[sysfolder]['filesets']: # sysfolder is only ACTIVE systems per line 642(ish) above
sub_dict = self.systems[sysfolder]['filesets'][setfolder]
if self.utils.dict_active(sub_dict, 'system-fileset overrides'):
runfiletext = self.utils.substitute(runfiletext, sub_dict,
subname='system-fileset overrides (highest priority)')
# SUBSTITUTE values for: system-defaults [source_systems.yaml]
sub_dict = self.systems[sysfolder] # sysfolder is only ACTIVE systems per line 642(ish) above
if self.utils.dict_active(sub_dict, 'system defaults'):
runfiletext = self.utils.substitute(runfiletext, sub_dict, skipkeys=['filesets'],
subname='system defaults')
# SUBSTITUTE values for: overall application defaults (never inactive) [config.yaml substitutions]
self.utils.log(' always use dictionary')
runfiletext = self.utils.substitute(runfiletext, self.substitutions,
subname='overall app defaults (config.substitutions)')
# SUBSTITUTE values for: TRANSCEND (mostly for db_coa and db_region)
runfiletext = self.utils.substitute(runfiletext, self.transcend,
subname='overall transcend database defaults (db_coa and db_region)',
skipkeys=['host', 'username', 'password',
'logmech'])
# SUBSTITUTE values for: individual file subs [fileset.yaml --> files]
if setfolder in self.filesets:
sub_dict = {}
for file_key, file_dict in self.filesets[setfolder]['files'].items():
if ntpath.basename(file_dict['gitfile']) == runfile:
sub_dict = file_dict
break
if sub_dict:
runfiletext = self.utils.substitute(runfiletext, sub_dict,
skipkeys=['collection',
'dbsversion', 'gitfile'],
subname='file substitutions')
# SUBSTITUTE values for: fileset defaults [fileset.yaml substitutions]
if setfolder in self.filesets:
sub_dict = self.filesets[setfolder]
if self.utils.dict_active(sub_dict, 'fileset defaults'):
runfiletext = self.utils.substitute(runfiletext, sub_dict, skipkeys=['files'],
subname='fileset defaults (lowest priority)')
# split sql file into many sql statements
sqls_raw = runfiletext.split(';')
self.utils.log(' sql statements in file', str(len(sqls_raw) - 1))
sqls_done = []
i = 0
# loop thru individual sql statements within file
for sql_raw in sqls_raw:
# light formatting...
sql = self.utils.format_sql(sql_raw)
if sql != '':
i += 1
self.utils.log(' SQL %i' % i, '%s...' % sql[:50].replace('\n', ' '))
# Get SPECIAL COMMANDS
cmds = self.utils.get_special_commands(sql, '{{replaceMe:{cmdname}}}',
keys_to_skip=['save', 'load', 'call', 'vis', 'pptx'])
sql = cmds['sql'] # sql stripped of commands (now in dict)
del cmds['sql']
self.utils.log(' processing special commands')
for cmdname, cmdvalue in cmds.items():
# --> FILE <--: replace with local sql file
if str(cmdname[:4]).lower() == 'file':
self.utils.log(' replace variable with a local sql file')
if not os.path.isfile(os.path.join(runpath, cmdvalue)):
self.utils.log('custom file missing',
os.path.join(runpath, cmdvalue), warning=True)
self.utils.log(
' This may be by design, consult CSM for details.')
# raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), os.path.join(runpath, cmdvalue))
else:
self.utils.log(' specified file found', cmdvalue)
with open(os.path.join(runpath, cmdvalue), 'r') as fh:
tempsql = fh.read()
sqls_done.append('/* BEGIN file insert: %s */ \n%s' % (
cmdvalue, tempsql))
sql = sql.replace('{{replaceMe:%s}}' % cmdname,
'END file insert: %s' % cmdvalue, 1)
# --> TEMP <--: load temp file from .csv
if str(cmdname[:4]).lower() == 'temp':
self.utils.log(' create temp (volatile) table from .csv')
if not os.path.isfile(os.path.join(runpath, cmdvalue)):
self.utils.log('csv file missing!!!',
os.path.join(runpath, cmdvalue), error=True)
raise FileNotFoundError(errno.ENOENT,
os.strerror(errno.ENOENT),
os.path.join(runpath, cmdvalue))
else:
self.utils.log(' csv file found', cmdvalue)
tempsql = self.utils.sql_create_temp_from_csv(
os.path.join(runpath, cmdvalue))
sqls_done.append(tempsql)
sql = sql.replace('{{replaceMe:%s}}' % cmdname,
'above volatile table create script for %s' % cmdvalue,
1)
# --> LOOP <--: loop thru csv and generate one sql per csv row, with substitutions
if str(cmdname[:4]).lower() == 'loop':
self.utils.log(' loop sql once per row in .csv, with substitutions')
# can we find the file?
if not os.path.isfile(os.path.join(runpath, cmdvalue)):
self.utils.log('csv file missing!!!',
os.path.join(runpath, cmdvalue), warning=True)
else:
self.utils.log(' file found!')
df = pd.read_csv(os.path.join(runpath, cmdvalue))
self.utils.log(' rows in file', str(len(df)))
# perform csv substitutions
self.utils.log(
' perform csv file substitutions (find: {column_name}, replace: row value)')
for index, row in df.iterrows():
tempsql = sql
for col in df.columns:
col = col.strip()
tempsql = tempsql.replace(str('{%s}' % col),
str(row[col]).strip())
tempsql = tempsql.replace('{{replaceMe:%s}}' % cmdname,
' csv row %i out of %i ' % (
index + 1, len(df)))
self.utils.log(' sql generated from row data',
'character length = %i' % len(tempsql))
sqls_done.append(tempsql)
sql = '' # don't append original sql again - it is only a template
# --> others, append special command back to the SQL for processing in the run phase
# if str(cmdname[:4]).lower() in ['save','load','call']:
# sql = sql.replace('/* {{replaceMe:%s}} */' %cmdname,'/*{{%s:%s}}*/' %(cmdname, cmdvalue), 1)
# after all special commands, append the original sql
sqls_done.append(sql)
# write out new finalized file content:
self.utils.log(' writing out final sql')
with open(runfilepath, 'w') as fh:
fh.write('\n\n'.join(sqls_done))
self.utils.log('done!')
self.utils.log('time', str(dt.datetime.now()))
def archive_prepared_sql(self, name=''):
"""Manually archives (moves) all folders / files in the 'run' folder, where
prepared sql is stored after the prepare_sql() function. This includes the
runlog.txt. All files are moved, leaving the 'run' folder empty after the
operation. The destination folder is a new time-stamped output folder (with
optional name). Useful when you don't have access to execute_run() (the
process that normally archives collateral) against customer system directly,
but still want to keep a record of that 'run'. For example, if you need to
prepare sql to send to a customer DBA for execution - you cannot execute_run()
yourself, but still want to keep a record of what was sent.
USAGE:
tdcoa.archive_prepared_sql(name = 'name_of_archive')
- name: optional string to append to folder name (after timestamp)
all non-alphanumeric characters in name are replaced with underscore
EXAMPLE:
from tdcsm.tdcoa import tdcoa
coa = tdcoa() # instantiates objects
coa.download_files() # get some collateral to move
coa.copy_download_to_sql # move download to sql store
coa.prepare_sql() # prepare sql for execution
# now we should have some files worth archiving:
coa.archive_prepared_sql ('march run for CustABC')
"""
self.utils.log('archive_prepared_sql started', header=True)
self.utils.log('time', str(dt.datetime.now()))
outputpath = self.make_output_folder(name)
runpath = os.path.join(self.approot, self.folders['run'])
self.utils.log('created output folder', outputpath)
self.utils.log('moving all content from', runpath)
self.utils.recursive_copy(runpath, outputpath)
self.utils.logpath = os.path.join(outputpath, 'runlog.txt')
self.utils.recursive_delete(os.path.join(self.approot, self.folders['run']))
os.mkdir(os.path.join(self.approot, self.folders['run']))
self.utils.log('done!')
self.utils.log('time', str(dt.datetime.now()))
def make_output_folder(self, foldername='', make_hidden_file=False, indent=0):
# Build final folder name/path:
name = str(dt.datetime.now())[:-7].replace(' ', '_').replace(':', '').strip()
if foldername.strip() != '': name = '%s--%s' %(name, str(re.sub('[^0-9a-zA-Z]+', '_', foldername.strip())))
outputpath = os.path.join(self.approot, self.folders['output'], name)
self.utils.log('Defined output folder', outputpath)
if not os.path.exists(outputpath): os.makedirs(outputpath)
if make_hidden_file:
self.utils.log('save location of last-run output folder to hidden file', indent=indent)
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'w') as lastoutput:
# relative path in case of system change (i.e. switching laptops)
lastoutput.write(outputpath[outputpath.find(self.folders['output']):])
return outputpath
def execute_run(self, name=''):
self.utils.log('execute_run started', header=True)
self.utils.log('time', str(dt.datetime.now()))
# at this point, we make the assumption that everything in the "run" directory is valid
# make output directory for execution output and other collateral
runpath = os.path.join(self.approot, self.folders['run'])
outputpath = self.make_output_folder(name)
skip_dbs = self.utils.validate_boolean(self.settings['skip_dbs'],'bool')
# create hidden file containing last run's output -- to remain in the root folder
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'w') as lastoutput:
# convert to relative path in case of system change (i.e. switching laptops)
lastoutput.write(outputpath[outputpath.find(self.folders['output']):])
self.utils.log('save location of last-run output folder to hidden file')
self.utils.log('last-run output', outputpath)
# loop through systems
for sysname in os.listdir(runpath):
sysfolder = os.path.join(runpath, sysname)
if os.path.isdir(sysfolder):
# iterate system folders -- must exist in source_systems.yaml!
if sysname not in self.systems or self.utils.dict_active(self.systems[sysname]) == False: # ADDED to ensure ACTIVE systems only
self.utils.log('SYSTEM NOT FOUND IN SOURCE_SYSTEMS.YAML', sysname, warning=True)
else:
# iterate file set folders -- ok to NOT exist, depending on setting
for setname in os.listdir(sysfolder):
setfolder = os.path.join(sysfolder, setname)
if os.path.isdir(setfolder):
# ACTIVE ONLY, within loop a few lines above
if setname not in self.systems[sysname]['filesets'] and str(
self.settings['run_non_fileset_folders']).strip().lower() != 'true':
self.utils.log('-' * self.utils.logspace)
self.utils.log('WARNING!!!\nfileset does not exist', setname)
self.utils.log(' AND setting "run_non_fileset_folders" is not "True"')
self.utils.log(' Skipping folder', setname)
else:
self.utils.log('SYSTEM: %s FILESET: %s' % (sysname, setname), header=True)
workpath = setfolder
outputfo = os.path.join(outputpath, sysname, setname)
self.utils.log('work (sql) path', workpath)
self.utils.log('output path', outputfo)
# collect all prepared sql files, place in alpha order
coasqlfiles = []
for coafile in os.listdir(workpath):
if coafile[:1] != '.' and coafile[-8:] == '.coa.sql':
self.utils.log('found prepared sql file', coafile)
coasqlfiles.append(coafile)
coasqlfiles.sort()
if len(coasqlfiles) == 0:
self.utils.log('no .coa.sql files found in\n %s' % workpath, warning=True)
else:
self.utils.log('all sql files alpha-sorted for exeuction consistency')
self.utils.log('sql files found', str(len(coasqlfiles)))
# create output folder:
self.utils.log('output folder', outputfo)
if not os.path.exists(outputfo):
os.makedirs(outputfo)
self.outputpath = outputfo
# create our upload-manifest, 1 manifest per fileset
self.utils.log('creating upload manifest file')
with open(os.path.join(outputfo, 'upload-manifest.json'), 'w') as manifest:
manifest.write('{"entries":[ ')
manifestdelim = '\n '
# connect to customer system: # ACTIVE ONLY within loop above
conn = self.utils.open_connection(
conntype=self.systems[sysname]['driver'],
encryption=self.systems[sysname]['encryption'],
system=self.systems[sysname],
skip = skip_dbs) # <------------------------------- Connect to the database
# loop thru all sql files:
for coasqlfile in sorted(coasqlfiles):
self.utils.log('\nOPENING SQL FILE', coasqlfile)
with open(os.path.join(workpath, coasqlfile), 'r') as coasqlfilehdlr:
sqls = coasqlfilehdlr.read() # all sql statements in a sql file
sqlcnt = 0
for sql in sqls.split(';'): # loop thru the individual sql statements
sqlcnt += 1
if sql.strip() == '':
self.utils.log('null statement, skipping')
else:
self.utils.log('\n---- SQL #%i' % sqlcnt)
# pull out any embedded SQLcommands:
sqlcmd = self.utils.get_special_commands(sql)
sql = sqlcmd.pop('sql', '')
df = self.utils.open_sql(conn, sql, skip = skip_dbs) # <--------------------- Run SQL
csvfile=''
csvfile_exists=False
if len(df) != 0: # Save non-empty returns to .csv
if len(sqlcmd) == 0:
self.utils.log('no special commands found')
if 'save' not in sqlcmd:
sqlcmd['save'] = '%s.%s--%s' % (
sysname, setname, coasqlfile) + '%04d' % sqlcnt + '.csv'
# once built, append output folder, SiteID on the front, iterative counter if duplicates
# csvfile = os.path.join(outputfo, sqlcmd['save'])
csvfile = os.path.join(workpath, sqlcmd['save'])
i = 0
while os.path.isfile(csvfile):
i += 1
if i == 1:
csvfile = csvfile[:-4] + '.%03d' % i + csvfile[-4:]
else:
csvfile = csvfile[:-8] + '.%03d' % i + csvfile[-4:]
self.utils.log('CSV save location', csvfile)
self.utils.log('saving file...')
df.to_csv(csvfile, index=False) # <---------------------- Save to .csv
self.utils.log('file saved!')
csvfile_exists = os.path.exists(csvfile)
if 'vis' in sqlcmd: # run visualization py file
if csvfile_exists == False: # Avoid load error by skipping the manifest file entry if SQL returns zero records.
self.utils.log(
'The SQL returned Zero records and hence the file was not generated, So skipping the vis special command',
csvfile)
else:
self.utils.log('\nvis cmd', 'found')
vis_file = os.path.join(workpath, sqlcmd['vis'].replace('.csv', '.py'))
self.utils.log('vis py file', vis_file)
self.utils.log('running vis file..')
subprocess.run([sys.executable, vis_file])
self.utils.log('Vis file complete!')
if 'pptx' in sqlcmd: # insert to pptx file
from .pptx import replace_placeholders
self.utils.log('\npptx cmd', 'found')
pptx_file = Path(workpath) / sqlcmd['pptx']
self.utils.log('pptx file', str(pptx_file))
self.utils.log('inserting to pptx file..')
replace_placeholders(pptx_file, Path(workpath))
self.utils.log('pptx file complete!')
if 'load' in sqlcmd: # add to manifest
if csvfile_exists == False: #Avoid load error by skipping the manifest file entry if SQL returns zero records.
self.utils.log('The SQL returned Zero records and hence the file was not generated, So skipping the manifest entry',
csvfile)
else:
self.utils.log(
'file marked for loading to Transcend, adding to upload-manifest.json')
if 'call' not in sqlcmd:
sqlcmd['call'] = ''
manifest_entry = '%s{"file": "%s", "table": "%s", "call": "%s"}' % (
manifestdelim, sqlcmd['save'], sqlcmd['load'],
sqlcmd['call'])
manifestdelim = '\n,'
with open(os.path.join(outputfo, 'upload-manifest.json'),
'a') as manifest:
manifest.write(manifest_entry)
self.utils.log('Manifest updated',
str(manifest_entry).replace(',', ',\n'))
# archive file we just processed (for re-run-ability)
self.utils.log('Moving coa.sql file to Output folder', coasqlfile)
src = os.path.join(workpath, coasqlfile)
dst = os.path.join(outputfo, coasqlfile)
shutil.move(src, dst)
self.utils.log('')
# close JSON object
with open(os.path.join(outputfo, 'upload-manifest.json'), 'a') as manifest:
manifest.write("\n ]}")
self.utils.log('closing out upload-manifest.json')
# Move all files from run folder to output, for posterity:
self.utils.log('moving all other run artifacts to output folder, for archiving')
self.utils.recursive_copy(workpath, outputfo, replace_existing=False)
self.utils.recursive_delete(workpath)
# also COPY a few other operational files to output folder, for ease of use:
self.utils.log('-' * self.utils.logspace)
self.utils.log('post-processing')
for srcpath in [os.path.join(self.approot, '.last_run_output_path.txt'),
self.configpath, self.filesetpath]:
self.utils.log('copy to output folder root, for ease of use: \n %s' % srcpath)
dstpath = os.path.join(outputpath, os.path.basename(srcpath))
shutil.copyfile(srcpath, dstpath)
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
# after logging is done, move the log file too...
runlogsrc = os.path.join(self.approot, self.folders['run'], 'runlog.txt')
runlogdst = os.path.join(outputpath, 'runlog.txt')
if os.path.isfile(runlogsrc): shutil.move(runlogsrc, runlogdst)
def collect_data(self, name=''):
self.utils.log('collect_data started', header=True)
self.utils.log('time', str(dt.datetime.now()))
# at this point, we make the assumption that everything in the "run" directory is valid
# make output directory for execution output and other collateral
runpath = os.path.join(self.approot, self.folders['run'])
outputpath = self.make_output_folder(name)
# create hidden file containing last run's output -- to remain in the root folder
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'w') as lastoutput:
# convert to relative path in case of system change (i.e. switching laptops)
lastoutput.write(outputpath[outputpath.find(self.folders['output']):])
self.utils.log('save location of last-run output folder to hidden file')
self.utils.log('last-run output', outputpath)
# loop through systems
for sysname in os.listdir(runpath):
sysfolder = os.path.join(runpath, sysname)
if os.path.isdir(sysfolder):
# iterate system folders -- must exist in config.yaml!
if sysname not in self.systems:
self.utils.log('SYSTEM NOT FOUND IN CONFIG.YAML', sysname, warning=True)
else:
# iterate file set folders -- ok to NOT exist, depending on setting
for setname in os.listdir(sysfolder):
setfolder = os.path.join(sysfolder, setname)
if os.path.isdir(setfolder):
if setname not in self.systems[sysname]['filesets'] and str(
self.settings['run_non_fileset_folders']).strip().lower() != 'true':
self.utils.log('-' * self.utils.logspace)
self.utils.log('WARNING!!!\nfileset does not exist', setname)
self.utils.log(' AND setting "run_non_fileset_folders" is not "True"')
self.utils.log(' Skipping folder', setname)
else:
self.utils.log('SYSTEM: %s FILESET: %s' % (sysname, setname), header=True)
workpath = setfolder
outputfo = os.path.join(outputpath, sysname, setname)
self.utils.log('work (sql) path', workpath)
self.utils.log('output path', outputfo)
# collect all prepared sql files, place in alpha order
coasqlfiles = []
for coafile in os.listdir(workpath):
if coafile[:1] != '.' and coafile[-8:] == '.coa.sql':
self.utils.log('found prepared sql file', coafile)
coasqlfiles.append(coafile)
coasqlfiles.sort()
if len(coasqlfiles) == 0:
self.utils.log('no .coa.sql files found in\n %s' % workpath, warning=True)
else:
self.utils.log('all sql files alpha-sorted for exeuction consistency')
self.utils.log('sql files found', str(len(coasqlfiles)))
# create output folder:
self.utils.log('output folder', outputfo)
if not os.path.exists(outputfo):
os.makedirs(outputfo)
self.outputpath = outputfo
# create our upload-manifest, 1 manifest per fileset
self.utils.log('creating upload manifest file')
with open(os.path.join(outputfo, 'upload-manifest.json'), 'w') as manifest:
manifest.write('{"entries":[ ')
manifestdelim = '\n '
# connect to customer system:
conn = self.utils.open_connection(
conntype=self.systems[sysname]['driver'],
encryption=self.systems[sysname]['encryption'],
system=self.systems[sysname],
skip = self.skip_dbs) # <------------------------------- Connect to the database
# loop thru all sql files:
for coasqlfile in sorted(coasqlfiles):
self.utils.log('\nOPENING SQL FILE', coasqlfile)
with open(os.path.join(workpath, coasqlfile), 'r') as coasqlfilehdlr:
sqls = coasqlfilehdlr.read() # all sql statements in a sql file
sqlcnt = 0
for sql in sqls.split(';'): # loop thru the individual sql statements
sqlcnt += 1
if sql.strip() == '':
self.utils.log('null statement, skipping')
else:
self.utils.log('\n---- SQL #%i' % sqlcnt)
# pull out any embedded SQLcommands:
sqlcmd = self.utils.get_special_commands(sql)
sql = sqlcmd.pop('sql', '')
df = self.utils.open_sql(conn, sql, skip = self.skip_dbs) # <--------------------- Run SQL
csvfile=''
csvfile_exists=False
if len(df) != 0: # Save non-empty returns to .csv
if len(sqlcmd) == 0:
self.utils.log('no special commands found')
if 'save' not in sqlcmd:
sqlcmd['save'] = '%s.%s--%s' % (
sysname, setname, coasqlfile) + '%04d' % sqlcnt + '.csv'
# once built, append output folder, SiteID on the front, iterative counter if duplicates
# csvfile = os.path.join(outputfo, sqlcmd['save'])
csvfile = os.path.join(workpath, sqlcmd['save'])
i = 0
while os.path.isfile(csvfile):
i += 1
if i == 1:
csvfile = csvfile[:-4] + '.%03d' % i + csvfile[-4:]
else:
csvfile = csvfile[:-8] + '.%03d' % i + csvfile[-4:]
self.utils.log('CSV save location', csvfile)
self.utils.log('saving file...')
df.to_csv(csvfile, index=False) # <---------------------- Save to .csv
self.utils.log('file saved!')
csvfile_exists = os.path.exists(csvfile)
if 'load' in sqlcmd: # add to manifest
if csvfile_exists == False: #Avoid load error by skipping the manifest file entry if SQL returns zero records.
self.utils.log('The SQL returned Zero records and hence the file was not generated, So skipping the manifest entry',
csvfile)
else:
self.utils.log(
'file marked for loading to Transcend, adding to upload-manifest.json')
if 'call' not in sqlcmd:
sqlcmd['call'] = ''
manifest_entry = '%s{"file": "%s", "table": "%s", "call": "%s"}' % (
manifestdelim, sqlcmd['save'], sqlcmd['load'],
sqlcmd['call'])
manifestdelim = '\n,'
with open(os.path.join(outputfo, 'upload-manifest.json'),
'a') as manifest:
manifest.write(manifest_entry)
self.utils.log('Manifest updated',
str(manifest_entry).replace(',', ',\n'))
# archive file we just processed (for re-run-ability)
self.utils.log('Moving coa.sql file to Output folder', coasqlfile)
src = os.path.join(workpath, coasqlfile)
dst = os.path.join(outputfo, coasqlfile)
shutil.move(src, dst)
self.utils.log('')
# close JSON object
with open(os.path.join(outputfo, 'upload-manifest.json'), 'a') as manifest:
manifest.write("\n ]}")
self.utils.log('closing out upload-manifest.json')
# Move all files from run folder to output, for posterity:
self.utils.log('moving all other run artifacts to output folder, for archiving')
self.utils.recursive_copy(workpath, outputfo, replace_existing=False)
self.utils.recursive_delete(workpath)
# also COPY a few other operational files to output folder, for ease of use:
self.utils.log('-' * self.utils.logspace)
self.utils.log('post-processing')
for srcpath in [os.path.join(self.approot, '.last_run_output_path.txt'),
self.configpath, self.filesetpath]:
self.utils.log('copy to output folder root, for ease of use: \n %s' % srcpath)
dstpath = os.path.join(outputpath, os.path.basename(srcpath))
shutil.copyfile(srcpath, dstpath)
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
# after logging is done, move the log file too...
runlogsrc = os.path.join(self.approot, self.folders['run'], 'runlog.txt')
runlogdst = os.path.join(outputpath, 'runlog.txt')
shutil.move(runlogsrc, runlogdst)
def process_data(self, _outputpath=''):
self.utils.log('process_data started', header=True)
self.utils.log('time', str(dt.datetime.now()))
#
# Find the latest output path where the output of collect_data resides
if _outputpath != '': # use supplied path
outputpath = _outputpath
self.utils.log('output folder = manual param', outputpath)
elif os.path.isfile(os.path.join(self.approot,
'.last_run_output_path.txt')): # get path from hidden .last_run_output_path.txt
# .last_run_output_path.txt in approot
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'r') as fh:
outputpath = fh.read().strip().split('\n')[0]
outputpath = os.path.join(self.approot, outputpath) # create absolute path from relative path in file
self.utils.log('output folder', outputpath)
elif self.outputpath != '':
# local variable set
outputpath = self.outputpath
self.utils.log('output folder = class variable: coa.outputpath', outputpath)
else:
outputpath = ''
self.utils.log('no output path defined')
# now that outputfo is defined, let's make sure the dir actually exists:
if not os.path.isdir(outputpath):
self.utils.log('\nERROR = invalid path', outputpath)
raise NotADirectoryError('Invalid Path: %s' % outputpath)
else:
self.outputpath = outputpath
# update log file to correct location
self.utils.log('updating runlog.txt location')
self.utils.logpath = os.path.join(outputpath, 'runlog.txt')
self.utils.bufferlogs = False
self.utils.log('unbuffer logs')
# at this point, we make the assumption that everything in the "run" directory is valid
# create hidden file containing last run's output -- to remain in the root folder
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'w') as lastoutput:
# convert to relative path in case of system change (i.e. switching laptops)
lastoutput.write(outputpath[outputpath.find(self.folders['output']):])
self.utils.log('save location of last-run output folder to hidden file')
self.utils.log('last-run output', outputpath)
# loop through systems
for sysname in os.listdir(outputpath):
sysfolder = os.path.join(outputpath, sysname)
if os.path.isdir(sysfolder):
# iterate system folders -- must exist in config.yaml!
if sysname not in self.systems:
self.utils.log('SYSTEM NOT FOUND IN CONFIG.YAML', sysname, warning=True)
else:
# iterate file set folders -- ok to NOT exist, depending on setting
for setname in os.listdir(sysfolder):
setfolder = os.path.join(sysfolder, setname)
if os.path.isdir(setfolder):
if setname not in self.systems[sysname]['filesets'] and str(
self.settings['run_non_fileset_folders']).strip().lower() != 'true':
self.utils.log('-' * self.utils.logspace)
self.utils.log('WARNING!!!\nfileset does not exist', setname)
self.utils.log(' AND setting "run_non_fileset_folders" is not "True"')
self.utils.log(' Skipping folder', setname)
else:
self.utils.log('SYSTEM: %s FILESET: %s' % (sysname, setname), header=True)
workpath = setfolder
outputfo = os.path.join(outputpath, sysname, setname)
self.utils.log('work (sql) path', workpath)
self.utils.log('output path', outputfo)
# collect all prepared sql files, place in alpha order
coasqlfiles = []
for coafile in os.listdir(workpath):
if coafile[:1] != '.' and coafile[-8:] == '.coa.sql':
self.utils.log('found prepared sql file', coafile)
coasqlfiles.append(coafile)
coasqlfiles.sort()
if len(coasqlfiles) == 0:
self.utils.log('no .coa.sql files found in\n %s' % workpath, warning=True)
else:
self.utils.log('all sql files alpha-sorted for exeuction consistency')
self.utils.log('sql files found', str(len(coasqlfiles)))
# loop thru all sql files:
for coasqlfile in sorted(coasqlfiles):
self.utils.log('\nOPENING SQL FILE', coasqlfile)
with open(os.path.join(workpath, coasqlfile), 'r') as coasqlfilehdlr:
sqls = coasqlfilehdlr.read() # all sql statements in a sql file
sqlcnt = 0
for sql in sqls.split(';'): # loop thru the individual sql statements
sqlcnt += 1
if sql.strip() == '':
self.utils.log('null statement, skipping')
else:
self.utils.log('\n---- SQL #%i' % sqlcnt)
# pull out any embedded SQLcommands:
sqlcmd = self.utils.get_special_commands(sql)
sql = sqlcmd.pop('sql', '')
csvfile=''
csvfile_exists=False
if len(sqlcmd) == 0:
self.utils.log('no special commands found')
if 'save' in sqlcmd:
csvfile = os.path.join(workpath, sqlcmd['save'])
csvfile_exists = os.path.exists(csvfile)
if 'vis' in sqlcmd: # run visualization py file
if csvfile_exists == False: # Avoid load error by skipping the manifest file entry if SQL returns zero records.
self.utils.log(
'The SQL returned Zero records and hence the file was not generated, So skipping the vis special command',
csvfile)
else:
self.utils.log('\nvis cmd', 'found')
vis_file = os.path.join(workpath, sqlcmd['vis'].replace('.csv', '.py'))
self.utils.log('vis py file', vis_file)
self.utils.log('running vis file..')
os.system('python %s' % vis_file)
self.utils.log('Vis file complete!')
if 'pptx' in sqlcmd: # insert to pptx file
from .pptx import replace_placeholders
self.utils.log('\npptx cmd', 'found')
pptx_file = Path(workpath) / sqlcmd['pptx']
self.utils.log('pptx file', str(pptx_file))
self.utils.log('inserting to pptx file..')
replace_placeholders(pptx_file, Path(workpath))
self.utils.log('pptx file complete!')
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
def process_manual_files(self,_outputpath=''):
# This function assumes that the manual csv files are placed in the latest output folder
self.utils.log('process_manual_files started', header=True)
self.utils.log('time', str(dt.datetime.now()))
self.utils.log('Running Prepare SQL step to get the required files for manual data processing')
self.prepare_sql()
runpath = os.path.join(self.approot, self.folders['run'])
# Find the latest output path
if _outputpath != '': # use supplied path
outputpath = _outputpath
self.utils.log('output folder = manual param', outputpath)
elif os.path.isfile(os.path.join(self.approot,
'.last_run_output_path.txt')): # get path from hidden .last_run_output_path.txt
# .last_run_output_path.txt in approot
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'r') as fh:
outputpath = fh.read().strip().split('\n')[0]
outputpath = os.path.join(self.approot, outputpath) # create absolute path from relative path in file
self.utils.log('output folder', outputpath)
elif self.outputpath != '':
# local variable set
outputpath = self.outputpath
self.utils.log('output folder = class variable: coa.outputpath', outputpath)
else:
outputpath = ''
self.utils.log('no output path defined')
# now that outputfo is defined, let's make sure the dir actually exists:
if not os.path.isdir(outputpath):
self.utils.log('\nERROR = invalid path', outputpath)
raise NotADirectoryError('Invalid Path: %s' % outputpath)
else:
self.outputpath = outputpath
# update log file to correct location
self.utils.log('updating runlog.txt location')
self.utils.logpath = os.path.join(outputpath, 'runlog.txt')
self.utils.bufferlogs = False
self.utils.log('unbuffer logs')
for dirs in os.listdir(runpath):
if os.path.isdir(os.path.join(runpath,dirs)):
shutil.move(os.path.join(runpath,dirs),outputpath)
manual_files = []
for files in os.listdir(outputpath):
file_path=os.path.join(outputpath,files)
files=os.path.basename(files)
filename, extension = os.path.splitext(files)
if str(extension).lower() == '.csv':
manual_files.append(files)
system_name = files.split('.')[0]
fileset = files.split('.')[1]
system_dir=os.path.join(outputpath,system_name)
fileset_dir = os.path.join(system_dir, fileset)
if os.path.isdir(fileset_dir):
shutil.copy(file_path, fileset_dir)
os.rename(os.path.join(fileset_dir, files),
os.path.join(fileset_dir, str(files.split('.')[2]) + '.' + str(files.split('.')[3])))
shutil.copy(os.path.join(outputpath,'upload-manifest.json'), os.path.join(fileset_dir, 'upload-manifest.json'))
else:
os.mkdir(fileset_dir)
shutil.move(files, fileset_dir)
# Move the runlog created by the prepare_sql function
os.rename(os.path.join(runpath,'runlog.txt'),os.path.join(runpath,'prepare_sql_runlog.txt'))
shutil.move(os.path.join(runpath,'prepare_sql_runlog.txt'), outputpath)
def make_customer_files(self, name=''):
self.utils.log('make_customer_files started', header=True)
self.utils.log('time', str(dt.datetime.now()))
self.bteq_prefix
# at this point, we make the assumption that everything in the "run" directory is valid
# make output directory for execution output and other collateral
runpath = os.path.join(self.approot, self.folders['run'])
outputpath = self.make_output_folder(name)
# create hidden file containing last run's output -- to remain in the root folder
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'w') as lastoutput:
# convert to relative path in case of system change (i.e. switching laptops)
lastoutput.write(outputpath[outputpath.find(self.folders['output']):])
self.utils.log('save location of last-run output folder to hidden file')
self.utils.log('last-run output', outputpath)
self.utils.log('creating upload manifest file')
with open(os.path.join(outputpath, 'upload-manifest.json'), 'a') as manifest:
manifest.write('{"entries":[ ')
# loop through systems
for sysname in os.listdir(runpath):
sysfolder = os.path.join(runpath, sysname)
if os.path.isdir(sysfolder):
# iterate system folders -- must exist in config.yaml!
if sysname not in self.systems or self.utils.dict_active(self.systems[sysname]) == False: # ADDED to ensure ACTIVE systems only :
self.utils.log('SYSTEM NOT FOUND IN CONFIG.YAML', sysname, warning=True)
else:
# iterate file set folders -- ok to NOT exist, depending on setting
for setname in os.listdir(sysfolder):
setfolder = os.path.join(sysfolder, setname)
if os.path.isdir(setfolder):
# ACTIVE ONLY, within loop a few lines above
if setname not in self.systems[sysname]['filesets'] and str(
self.settings['run_non_fileset_folders']).strip().lower() != 'true':
self.utils.log('-' * self.utils.logspace)
self.utils.log('WARNING!!!\nfileset does not exist', setname)
self.utils.log(' AND setting "run_non_fileset_folders" is not "True"')
self.utils.log(' Skipping folder', setname)
else:
self.utils.log('SYSTEM: %s FILESET: %s' % (sysname, setname), header=True)
workpath = setfolder
#outputfo = os.path.join(outputpath, sysname, setname)
self.utils.log('work (sql) path', workpath)
#self.utils.log('output path', outputfo)
# collect all prepared sql files, place in alpha order
coasqlfiles = []
for coafile in os.listdir(workpath):
if coafile[:1] != '.' and coafile[-8:] == '.coa.sql':
self.utils.log('found prepared sql file', coafile)
coasqlfiles.append(coafile)
coasqlfiles.sort()
if len(coasqlfiles) == 0:
self.utils.log('no .coa.sql files found in\n %s' % workpath, warning=True)
else:
self.utils.log('all sql files alpha-sorted for execution consistency')
self.utils.log('sql files found', str(len(coasqlfiles)))
# create our upload-manifest, 1 manifest per fileset
manifestdelim = '\n '
# loop thru all sql files:
for coasqlfile in sorted(coasqlfiles):
self.utils.log('\nOPENING SQL FILE', coasqlfile)
with open(os.path.join(workpath, coasqlfile), 'r') as coasqlfilehdlr:
sqls = coasqlfilehdlr.read() # all sql statements in a sql file
sqlcnt = 0
for sql in sqls.split(';'): # loop thru the individual sql statements
sqlcnt += 1
if sql.strip() == '':
self.utils.log('null statement, skipping')
else:
self.utils.log('\n---- SQL #%i' % sqlcnt)
# pull out any embedded SQLcommands:
sqlcmd = self.utils.get_special_commands(sql)
if len(sqlcmd) == 0:
self.utils.log('no special commands found')
file_ext = ".manual.coa.sql"
manual_sqlfile_name = sysname + "." + setname + file_ext
manual_sqlfile_path= os.path.join(outputpath,manual_sqlfile_name)
manual_save_file_name = ""
if 'save' in sqlcmd or 'load' in sqlcmd or 'call' in sqlcmd:
manual_save_file_name=sysname + "." + setname + "." + sqlcmd['save']
with open(manual_sqlfile_path, 'a') as manual_file:
manual_file.write(
"------------------------------------------------------------- \n")
manual_file.write(
"/* Save the result of the below sql as a csv file with name: " +
manual_save_file_name + "*/ \n")
manual_file.write(
"------------------------------------------------------------- \n")
manual_file.write(sql + ";")
manual_file.write("\n\n")
else:
with open(manual_sqlfile_path, 'a') as manual_file:
manual_file.write(sql + ";")
manual_file.write("\n\n")
if 'load' in sqlcmd: # add to manifest
self.utils.log(
'file marked for loading to Transcend, adding to upload-manifest.json')
if 'call' not in sqlcmd:
sqlcmd['call'] = ''
manifestdelim = '\n,'
manifest_entry = '{"file": "%s", "table": "%s", "call": "%s"}%s' % (
manual_save_file_name, sqlcmd['load'],
sqlcmd['call'],manifestdelim)
manifest_filepath=os.path.join(outputpath, 'upload-manifest.json')
manifest=open(manifest_filepath,'a')
manifest.write(manifest_entry)
self.utils.log('Manifest updated',str(manifest_entry).replace(',', ',\n'))
manifest.close()
self.utils.recursive_delete(workpath)
manifest = open(os.path.join(outputpath, 'upload-manifest.json'), 'a')
manifest.write("\n ]}")
self.utils.log('closing out upload-manifest.json')
manifest.close()
# Below logic is to remove the comma (,) at the end of the last element in the manifest json file.
with open(os.path.join(outputpath, 'upload-manifest.json'),'r') as manifest_file:
manifest_content=manifest_file.read()
manifest_content=manifest_content.replace(",\n ]}","\n ]}")
with open(os.path.join(outputpath, 'upload-manifest.json'), 'w') as manifest_file:
manifest_file.write(manifest_content)
# also COPY a few other operational files to output folder, for ease of use:
self.utils.log('-' * self.utils.logspace)
self.utils.log('post-processing')
for srcpath in [os.path.join(self.approot, '.last_run_output_path.txt'),
self.configpath, self.filesetpath]:
self.utils.log('copy to output folder root, for ease of use: \n %s' % srcpath)
dstpath = os.path.join(outputpath, os.path.basename(srcpath))
shutil.copyfile(srcpath, dstpath)
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
# after logging is done, move the log file too...
runlogsrc = os.path.join(self.approot, self.folders['run'], 'runlog.txt')
runlogdst = os.path.join(outputpath, 'runlog.txt')
shutil.move(runlogsrc, runlogdst)
self.utils.log('make_customer_files Completed', header=True)
def upload_to_transcend(self, _outputpath=''):
self.utils.bufferlogs = True
self.utils.log('upload_to_transcend started', header=True)
self.utils.log('time', str(dt.datetime.now()))
skip_dbs = self.utils.validate_boolean(self.settings['skip_dbs'],'bool')
# process 3 ways to get output path
if _outputpath != '': # use supplied path
outputpath = _outputpath
self.utils.log('output folder = manual param', outputpath)
elif os.path.isfile(os.path.join(self.approot, '.last_run_output_path.txt')): # get path from hidden .last_run_output_path.txt
# .last_run_output_path.txt in approot
with open(os.path.join(self.approot, '.last_run_output_path.txt'), 'r') as fh:
outputpath = fh.read().strip().split('\n')[0]
outputpath = os.path.join(self.approot, outputpath) # create absolute path from relative path in file
self.utils.log('output folder', outputpath)
elif self.outputpath != '':
# local variable set
outputpath = self.outputpath
self.utils.log('output folder = class variable: coa.outputpath', outputpath)
else:
outputpath = ''
self.utils.log('no output path defined')
# now that outputfo is defined, let's make sure the dir actually exists:
if not os.path.isdir(outputpath):
self.utils.log('\nERROR = invalid path', outputpath)
raise NotADirectoryError('Invalid Path: %s' % outputpath)
else:
self.outputpath = outputpath
# update log file to correct location
self.utils.log('updating runlog.txt location')
self.utils.logpath = os.path.join(outputpath, 'runlog.txt')
self.utils.bufferlogs = False
self.utils.log('unbuffer logs')
# connect to Transcend
self.utils.log('connecting to transcend')
self.utils.log(' host', self.transcend['host'])
self.utils.log(' logmech', self.transcend['logmech'])
self.utils.log(' username', self.transcend['username'])
self.utils.log(' password', self.transcend['password'])
self.utils.log(' db_coa', self.transcend['db_coa'])
self.utils.log("\nNOTE: if you happen to see a scary WARNING below, DON'T PANIC!")
self.utils.log(" it just means you already had an active connection that was replaced.\n")
transcend = self.utils.open_connection(
'teradataml',
system=self.transcend,
skip = skip_dbs) # <--------------------------------- Connect
# Walk the directory structure looking for upload_manifest.json files
for workpath, subfo, files in os.walk(outputpath):
self.utils.log('\nexamining folder', str(workpath).strip())
workname = os.path.split(workpath)[1]
if str(workname)[:1] != '.': # no hidden folders
if 'upload-manifest.json' in files:
self.utils.log('FOUND upload-manifest.json')
with open(os.path.join(workpath, 'upload-manifest.json'), 'r') as fh:
manifestjson = fh.read()
manifest = json.loads(manifestjson)
self.utils.log('upload count found', str(len(manifest['entries'])))
self.utils.log('manifest file', '\n%s' % str(manifest))
if len(manifest['entries']) == 0:
self.utils.log('nothing to upload, skipping', workpath)
else:
for entry in manifest['entries']:
successful_load = False # flag to track if sp should be called after load attempt
# define database and table names
if '.' in entry['table']:
entry['schema'] = entry['table'].split('.')[0]
entry['table'] = entry['table'].split('.')[1]
else:
entry['schema'] = 'adlste_coa_stg'
self.utils.log('\nPROCESSING NEW ENTRY')
self.utils.log(' load file', entry['file'])
self.utils.log(' into table', entry['table'])
self.utils.log(' of schema', entry['schema'])
self.utils.log(' then call', entry['call'])
self.utils.log('-' * 10)
# open CSV and prepare for appending
csvfilepath = os.path.join(workpath, entry['file'])
self.utils.log('opening csv', csvfilepath)
dfcsv = pd.read_csv(csvfilepath)
dfcsv = dfcsv.where(pd.notnull(dfcsv), None)
self.utils.log('records found', str(len(dfcsv)))
# strip out any unnamed columns
for col in dfcsv.columns:
if col[:8] == 'Unnamed:':
self.utils.log('unnamed column dropped', col)
self.utils.log(' (usually the pandas index as a column, "Unnamed: 0")')
dfcsv = dfcsv.drop(columns=[col])
self.utils.log('final column count', str(len(dfcsv.columns)))
# APPEND data to database
self.utils.log('\nuploading', str(dt.datetime.now()))
try:
# write_to_perm = True
# steps:
# 1. load to staging table (perm)
# 2. load staging data --> global temp table (GTT)
# 3. call sp on GTT to merge to final table
# 4. delete staging table (perm)
if self.settings['write_to_perm'].lower() == 'true':
self.utils.log('write_to_perm', 'True')
self.utils.log('perm table', entry['schema'] + '.' + entry['table'] + '_%s' % self.unique_id)
# create staging table (perm) with unique id
if not skip_dbs:
df_to_sql(transcend['connection'], dfcsv, entry['table'], entry['schema'], copy_sfx=self.unique_id)
self.utils.log('load to PERM and GTT complete', str(dt.datetime.now()))
successful_load = True
# write_to_perm = False
# steps:
# 1. load into pre-created GTT table
# 1a. (will auto-create perm table if GTT doesnt exist)
# 2. call sp on GTT to merge to final table
else:
self.utils.log('write_to_perm', 'False')
if not skip_dbs:
df_to_sql(transcend['connection'], dfcsv, entry['table'], entry['schema'])
self.utils.log('load to GTT complete', str(dt.datetime.now()))
successful_load = True
except Exception as err:
from textwrap import dedent
self.utils.log('\nERROR during UPLOAD', error=True)
self.utils.log(str(err))
self.utils.log(' (error repeated below)')
self.utils.log('\n first 10 records of what was being uploaded (dataframe):')
self.utils.log(dfcsv[0:10])
self.utils.log('')
sql = dedent(f"""\
Select ColumnName, ColumnType, ColumnFormat, ColumnLength, ColumnId
from dbc.columns
where databasename = '{entry['schema']}'
and tablename = '{entry['table']}'
order by ColumnId""")
self.utils.log(sql)
df = sql_to_df(transcend['connection'], sql)
self.utils.log('\n\n structure of destination table:')
print(df)
self.utils.log('\n')
exit() # todo possibly remove so that whole process doesnt stop on error?
# CALL any specified SPs only if data loaded successfully:
if str(entry['call']).strip() != "" and successful_load:
self.utils.log('\nStored Proc', str(entry['call']))
try:
if not skip_dbs:
with transcend['connection'].cursor() as csr:
csr.execute('call %s ;' % str(entry['call']))
self.utils.log('complete', str(dt.datetime.now()))
# if write_to_perm == true, drop unique perm table after successful sp call
if self.settings['write_to_perm'].lower() == 'true':
self.utils.log('\ndrop unique perm table', entry['schema'] + '.' + entry['table'] + '_%s' % self.unique_id)
if not skip_dbs:
with transcend['connection'].cursor() as csr:
csr.execute("""
DROP TABLE {db}.{unique_table}
""".format(db=entry['schema'],
unique_table=entry['table'] + '_%s' % self.unique_id))
self.utils.log('complete', str(dt.datetime.now()))
except OperationalError as err: # raise exception if database execution error (e.g. permissions issue)
self.utils.log('\n\n')
self.utils.log(str(err).partition('\n')[0], error=True)
exit()
self.utils.log('\ndone!')
self.utils.log('time', str(dt.datetime.now()))
def deactivate_all(self):
"""Sets all systems and system-filesets to False"""
for sysname, sysobject in self.systems.items():
sysobject['active'] = 'False'
for setname, setobject in sysobject['filesets'].items():
setobject['active'] = 'False'
def display_motd(self):
webbrowser.open(self.motd_url)
def yaml_config(self):
tmp = []
tmp.append('substitutions:')
tmp.append(' account: "Demo Customer"')
tmp.append(' startdate: "Current_Date - 7"')
tmp.append(' enddate: "Current_Date - 1"')
tmp.append('')
tmp.append('transcend:')
tmp.append(' host: "tdprdcop3.td.teradata.com"')
tmp.append(' username: "{td_quicklook}"')
tmp.append(' password: "{td_password}"')
tmp.append(' logmech: "LDAP"')
tmp.append(' db_coa: "adlste_coa"')
tmp.append(' db_region: "adlste_westcomm"')
tmp.append(' db_stg: "adlste_coa_stg"')
tmp.append('')
tmp.append('folders:')
tmp.append(' override: "0_override"')
tmp.append(' download: "1_download"')
tmp.append(' sql: "2_sql_store"')
tmp.append(' run: "3_ready_to_run"')
tmp.append(' output: "4_output"')
tmp.append('')
tmp.append('settings:')
tmp.append(' githost: "https://raw.githubusercontent.com/tdcoa/sql/master/"')
tmp.append(' gitfileset: "filesets/filesets.yaml"')
tmp.append(' gitmotd: "motd.html"')
tmp.append(' localfilesets: "./{download}/filesets.yaml"')
tmp.append(' secrets: "secrets.yaml"')
tmp.append(' systems: "source_systems.yaml"')
tmp.append(' text_format_extensions: [".sql", ".yaml", ".txt", ".csv", ".py"]')
tmp.append(' gui_show_dev_filesets: "False"')
tmp.append(' run_non_fileset_folders: "True"')
tmp.append(' skip_dbs: "False"')
tmp.append(' write_to_perm: "True"')
return '\n'.join(tmp)
def yaml_systems(self):
tmp = []
tmp.append('systems:')
tmp.append(' Transcend:')
tmp.append(' siteid: "TRANSCEND02" ')
tmp.append(' active: "True"')
tmp.append(' host: "tdprdcop3.td.teradata.com"')
tmp.append(' username: "{td_quicklook}"')
tmp.append(' password: "{td_password}"')
tmp.append(' logmech: "ldap"')
tmp.append(' driver: "sqlalchemy" ')
tmp.append(' encryption: "false"')
tmp.append(' use: "test" ')
tmp.append(' dbsversion: "16.20"')
tmp.append(' collection: "dbc"')
tmp.append(' filesets:')
tmp.append(' demo:')
tmp.append(' active: "True"')
return '\n'.join(tmp)
# ------------- everything below here is new /
# ------------- trying to reduce repetitive "file iteration" code
def make_customer_files2(self, name=''):
self.utils.log('generating manual customer files', header=True)
info = os.path.join(self.approot, self.folders['run'])
outfo = self.make_output_folder('assisted_run', make_hidden_file=True)
self.iterate_coa('generate bteq file', info, outfo, {'all_make_bteq': self.coasql_assist_bteq}, file_filter_regex="\.coa\.sql$", sqlfile=True)
self.iterate_coa('move run files to output', info, outfo, {'move_files': self.coafile_move}) # default is all files, sqlfile=False
self.iterate_coa('combine .coa files', outfo, outfo, {'combine_files': self.coafile_combine}, file_filter_regex="\.coa\.(bteq|sql)$")
def process_return_data2(self, folderpath):
self.utils.log('processing completed run files: %s' %folderpath, header=True)
info = outfo = folderpath
funcs = {'convert_psv_to_csv': self.coafile_convert_psv2csv}
self.iterate_coa('convert any psv to csv', info, outfo, funcs, file_filter_regex="\.(csv|psv)$")
funcs = {'delete_json_manifests': self.coafile_delete}
self.iterate_coa('remove any json manifests', info, outfo, funcs, file_filter_regex="manifest\.json$")
funcs = {'vis': self.coasql_visualize,
'pptx': self.coasql_pptx,
'load': self.coasql_make_uploadmanifest}
self.iterate_coa('process special commands', info, outfo, funcs, file_filter_regex="\.coa\.sql$", sqlfile=True)
# ------------- iteration engine:
def iterate_coa(self, proc_name='', folderpath_in='', folderpath_out='', functions={'do nothing': lambda *args: args[0]}, file_filter_regex='(.*?)', sqlfile=False):
"""iterate the folder_in path, recursively, pattern match files, and execute supplied function.
"""
# define fully qualified in/out paths (well, down to approot level)
self.utils.log('iteration started!', proc_name, header=True)
fin = folderpath_in if folderpath_in !='' else os.path.join(self.approot, self.folders['run'])
fout = folderpath_out if folderpath_out !='' else fin
self.utils.log('folder in: %s' %fin, '\nfolder out: %s' %fout)
self.utils.log('regex qualifier for files: %s' %file_filter_regex)
self.utils.log('%i functions to be applied at %s level %s' %(len(functions), 'SQL' if sqlfile else 'FILE', tuple(functions.keys()) ))
# iterate all subfolders and files (aka /fin/System/FileSet)
self.utils.log('\nfiles:')
for root, dirs, files in os.walk(fin):
for filename in sorted(files):
ind = 2
srcpath = os.path.join(root, filename)
dstpath = os.path.join(root.replace(fin,fout), filename)
fileset = os.path.dirname(srcpath).split(os.sep)[-1]
system = os.path.dirname(srcpath).split(os.sep)[-2]
if system not in self.systems: break # must be in a real system
self.utils.log('%s' %filename, indent=ind)
ind += 2
# only process if files match supplied regex pattern:
if len(re.findall(file_filter_regex, srcpath)) == 0: # NOT FOUND
self.utils.log('filter NOT satisfied', file_filter_regex, indent=ind)
else: # FOUND:
self.utils.log('filter IS satisfied', file_filter_regex, indent=ind)
self.utils.log('filepath in', srcpath, indent=ind)
self.utils.log('filepath out', dstpath, indent=ind)
# create dest folder if missing:
if not os.path.exists(os.path.dirname(dstpath)):
self.utils.log('creating filepath out', os.path.dirname(dstpath), indent=ind)
os.makedirs(os.path.dirname(dstpath))
# throw a few assumptions in the trunk, and let's go!
trunk = {'filepath_in':srcpath, 'filepath_out':dstpath, 'filename':filename,
'folderpath_in':os.path.dirname(srcpath), 'folderpath_out':os.path.dirname(dstpath),
'fileset':fileset, 'system':system, 'index':0, 'phase':'execute', 'postwork':{}, 'sql':{}}
# EXECUTE functions against entire file:
if not sqlfile:
for nm, func in functions.items():
trunk['function_name'] = nm
trunk['log_indent'] = ind
trunk = func(trunk)
self.utils.log('done!', indent=ind)
# Split sqlfile on ';' and EXECUTE function against each sql statement
elif sqlfile:
with open(srcpath, 'r') as fh:
sqls_text = fh.read()
sqls = sqls_text.split(";")
self.utils.log('sql file contains %i statements' %len(sqls), indent=ind)
trunk['sql'] = {}
trunk['index'] = 0
trunk['phase'] = 'execute'
# iterate sqls
for sql in sqls:
trunk['index'] +=1
self.utils.log('processing sql #%i' %trunk['index'], indent=6)
ind=8
trunk['special_commands'] = self.utils.get_special_commands(sql, indent=ind)
trunk['sql']['original'] = sql
trunk['sql']['formatted'] = self.utils.format_sql(sql)
trunk['sql']['special_command_out'] = trunk['special_commands']['sql']
trunk['log_indent'] = ind+2
# for any "all" functions, or any special commands, execute:
for name, func in functions.items():
if name in trunk['special_commands'] or name[:3]=='all':
trunk['function_name'] = name
self.utils.log('qualifies for function: %s' %name, indent=ind)
trunk = func(trunk)
# post work per file -- added to the trunk as needed by above func()
trunk['phase'] = 'postwork'
for postname, postfunc in trunk['postwork'].items():
trunk = postfunc(trunk)
# ------------- subprocesses designed to be plugged into the above engine
def coasql_do_nothing(self, trunk):
"""literally, does nothing. just a stub for testing/future work."""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
return trunk
def coafile_combine(self, trunk):
"""Combines all files supplied into one master file, by extension type.
i.e., all .sql files will be one file, all .bteq files another.
Combined master file will be named "!System_Fileset.ext"
Note: special handling exists for bteq files, to ensure only one "bteq_prefix"
section exists per master file, regardless of how many sub-bteq files there are"""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
fileext = str(trunk['filepath_in'].split('.')[-1]).lower().strip()
combinedfile = '!_%s_%s.combined.%s' %(trunk['system'], trunk['fileset'], fileext)
combinedpath = os.path.join(trunk['folderpath_out'], combinedfile)
combinedtext = ''
self.utils.log('reading file content %s into file %s' %(trunk['filename'], combinedfile), indent=trunk['log_indent'])
with open(trunk['filepath_in'], 'r') as fh:
filetext = fh.read()
# determine if file is BTEQ, and if so, ensure only one logon credential on top:
if self.bteq_prefix in filetext or fileext in ['btq','bteq']:
filetext = filetext.replace(self.bteq_prefix, '')
if not os.path.isfile(combinedpath):
with open(combinedpath, 'w') as fh:
fh.write(self.bteq_prefix)
# at this point, filetype doesn't matter... always append to master
with open(combinedpath, 'a') as fh:
fh.write(filetext)
return trunk
def coafile_move(self, trunk):
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
shutil.move(trunk['filepath_in'], trunk['filepath_out'])
self.utils.log('file moved from', trunk['filepath_in'], indent=trunk['log_indent']+2)
self.utils.log('file moved to', trunk['filepath_out'], indent=trunk['log_indent']+2)
trunk['filepath_in'] = trunk['filepath_out']
return trunk
def coafile_delete(self, trunk):
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
try:
self.utils.log('deleting file', trunk['filepath_in'], indent=trunk['log_indent']+2)
os.remove(trunk['filepath_in'])
self.utils.log('file deleted ', trunk['filepath_in'], indent=trunk['log_indent']+2)
except Exception as ex:
self.utils.log(ex, error=True, indent=trunk['log_indent']+2)
return trunk
def coafile_convert_psv2csv(self, trunk):
"""Tests csv files for pipe-delimited or self.bteq_delim, and if true, convert to comma delimited"""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
# open file and determine whether .csv or .psv:
self.utils.log('opening file', trunk['filepath_in'], indent=trunk['log_indent']+2)
filetext = open(trunk['filepath_in'],'r').read()
# iterate to the best-fit delimiter amonst candidates, with bias towards earlier found
self.utils.log('testing for best-fit delimiter candidate...')
sep = self.bteq_delim
for sepc in [',,', ',', '::', ':', ';;', ';', '||', '|']:
if (filetext.count(sepc)*len(sepc)*1.1) > (filetext.count(sep)*len(sep)): sep = sepc
self.utils.log('delimiter %s wins with %i instances found' %(str(sep), filetext.count(sep)))
filetext = None # be kind
self.utils.log('file delimiter determined as', sep, indent=trunk['log_indent']+4)
# if sep is greater than 1 character, it's treated as regex... let's undo that
if len(sep) > 1: sep = ''.join(['\\'+ c for c in sep])
if sep != ',':
df = pd.read_csv(trunk['filepath_in'], sep=sep)
df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x) # trim whitespace from data
df = df.rename(columns=lambda x: x.strip()) # trim whitespace from column headers
df = df.where(pd.notnull(df), None) # handle nulls
self.utils.log('records found', str(len(df)), indent=trunk['log_indent']+4)
self.utils.log('columns', str(list(df.columns)), indent=trunk['log_indent']+4)
if trunk['filepath_in'][-4:] == '.psv': trunk['filepath_in'] = trunk['filepath_in'][-4:] + '.csv'
df.to_csv(trunk['filepath_in'], index=False, quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
self.utils.log('file converted to .csv', indent=trunk['log_indent']+4)
else:
self.utils.log('file already .csv, no change', indent=trunk['log_indent']+4)
return trunk
def coasql_assist_bteq(self, trunk):
"""turns sql into bteq commands. If trunk['phase']=='postwork' the processed
will add prefix/suffix commands and save the .coa.bteq file."""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
if 'bteq_sql' not in trunk: trunk['bteq_sql'] = []
if trunk['phase'] == 'postwork':
# wrap bteq with begin/end logic, and save file:
trunk['bteq_sql'].insert(0, '-----> file: %s \n' %os.path.basename(trunk['filepath_in']) )
trunk['bteq_sql'].insert(0, self.bteq_prefix)
trunk['bteq_sql'].append('.export reset')
trunk['bteq_filepath'] = trunk['filepath_out'].replace('.coa.sql','.coa.bteq')
self.utils.log('file complete, saving...', trunk['bteq_filepath'], indent=trunk['log_indent'])
with open(trunk['bteq_filepath'], 'w') as fh:
fh.write('\n\n'.join(trunk['bteq_sql']))
self.utils.log('complete!', indent=trunk['log_indent'])
else: # still processing sql statements:
self.utils.log('translating to bteq...', indent=trunk['log_indent'])
if 'save' in trunk['special_commands']:
trunk['bteq_sql'].append('.export reset')
trunk['bteq_sql'].append('.export report file="%s" , close' %str(trunk['special_commands']['save']))
self.utils.log('adding export commands', indent=trunk['log_indent'])
trunk['bteq_sql'].append(trunk['sql']['formatted'])
# register postwork function, so this is called after file is complete
trunk['postwork']['assist_bteq'] = self.coasql_assist_bteq
return trunk
def coasql_visualize(self, trunk):
"""runs any external python script, specifically for visualizations"""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
pypath = os.path.join(trunk['folderpath_in'], trunk['special_commands']['vis'].replace('.csv','.py'))
self.utils.log('executing visualization', indent=trunk['log_indent']+2)
self.utils.log('on file', trunk['special_commands']['vis'], indent=trunk['log_indent']+4)
self.utils.log('using script', pypath, indent=trunk['log_indent']+4)
subprocess.run([sys.executable, pypath])
return trunk
def coasql_pptx(self, trunk):
"""process any visualizations"""
from .pptx import replace_placeholders
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
pptxpath = os.path.join(trunk['folderpath_in'], trunk['special_commands']['pptx'])
self.utils.log('performing powerpoint substitutions on %s' %pptxpath, indent=trunk['log_indent']+2)
pptx_file = Path(pptxpath)
replace_placeholders(pptx_file, Path(trunk['folderpath_out']))
self.utils.log('pptx file complete!', indent=trunk['log_indent']+2)
return trunk
def coasql_make_uploadmanifest(self, trunk):
"""creates upload_manifest.json (hopefully a legacy process soon)"""
self.utils.log('subfunction called', 'make upload manifest', indent=trunk['log_indent'])
s = l = c = ''
umfile = 'upload-manifest.json'
umpath = os.path.join(trunk['folderpath_out'], umfile)
# POSTWORK: perform all substitutions (file level)
if trunk['phase'] == 'postwork':
subs = [self.transcend, self.substitutions, self.settings, self.systems[trunk['system']], self.filesets[trunk['fileset']], self.systems[trunk['system']]['filesets'][trunk['fileset']] ]
self.utils.log('applying substitutions to upload manifest file', indent=trunk['log_indent']+4)
with open(umpath, 'r+') as fh:
umtext = fh.read()
for sub in subs:
umtext = self.utils.substitute(umtext, sub)
fh.seek(0)
fh.write(umtext)
fh.truncate()
else: # not post-work, (per sql level):
if 'save' in trunk['special_commands']: s = trunk['special_commands']['save']
if 'load' in trunk['special_commands']: l = trunk['special_commands']['load']
if 'call' in trunk['special_commands']: c = trunk['special_commands']['call']
line = '\n{ "file": "%s",\n "table": "%s",\n "call": "%s"}' %(s,l,c)
# no file = new run = lay in preable + line
if not os.path.isfile(umpath):
with open(umpath, 'w') as fh:
fh.write('{"entries":[\n %s \n]}' %line)
self.utils.log('created new upload manifest file', indent=trunk['log_indent']+4)
else: # file exists, just add comma + line (json close happens in postwork)
with open(umpath, 'r') as fh:
lines = fh.readlines()
if lines[-1].strip() == ']}': lines = lines[:-1]
lines.append(',' + line)
lines.append('\n]}')
with open(umpath, 'w') as fh:
fh.write(''.join(lines))
self.utils.log('added to upload manifest', line.replace('\n',''), indent=trunk['log_indent']+4)
trunk['postwork']['upload_manifest'] = self.coasql_make_uploadmanifest # register for postwork
return trunk
# ------------- subprocesses - stubs and partial work
def coasql_upload(self, trunk):
"""process any visualizations"""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
return trunk
def coasql_execute_sql(self, trunk):
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
return trunk
def coasql_save_csv(self, trunk):
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
return trunk
def coasql_load(self, trunk):
"""when handed a sql statement that qualifes for loading, loads to Transcend."""
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
# find name of .csv to load:
csvfilepath = os.path.join(trunk['folderpath_in'], trunk['special_commands']['save'])
self.utils.log('csvfile',csvfilepath, indent=trunk['log_indent'])
if not os.path.isfile(csvfile):
self.utils.log('cannot find file', csvfilepath, warning=True)
return trunk
# build connection object
if 'connection' not in trunk or trunk['connection'] is None:
trunk['connection'] = self.utils.open_connection(
conntype=self.systems[trunk['system']]['driver'],
encryption=self.systems[trunk['system']]['encryption'],
system=self.systems[trunk['system']],
skip = skip_dbs) # <------------------------------- Connect to the database
self.utils.log('UPLOADING file', os.path.basename(csvfilepath), indent=trunk['log_indent']+2)
self.utils.log(' TO system', trunk['system'], indent=trunk['log_indent']+2)
# open csv as dataframe
dfcsv = pd.read_csv(csvfilepath)
dfcsv = dfcsv.where(pd.notnull(dfcsv), None)
self.utils.log('records found', str(len(dfcsv)))
# strip out any unnamed columns
for col in dfcsv.columns:
if col[:8] == 'Unnamed:':
self.utils.log('unnamed column dropped', col)
self.utils.log(' (usually the pandas index as a column, "Unnamed: 0")')
dfcsv = dfcsv.drop(columns=[col])
self.utils.log('final column count', str(len(dfcsv.columns)))
return trunk
def coasql_call(self, trunk):
self.utils.log('subfunction called', trunk['function_name'], indent=trunk['log_indent'])
return trunk
|
import plotly.offline as py
import plotly.graph_objs as go
data = [go.Bar(x=['JSON', 'XML'], y=[26.2, 77.3])]
layout = go.Layout(
title='JSON vs XML',
yaxis=dict(
title='KB',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
width=1000,
height=642
)
fig = go.Figure(data=data, layout=layout)
py.plot(fig, filename='JSON-vs-XML')
|
# coding=utf-8
"""
© 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import gc
import os
import re
import logging
from naarad.metrics.metric import Metric
import naarad.utils
logger = logging.getLogger('naarad.metrics.top_metric')
class TopMetric(Metric):
def __init__ (self, metric_type, infile, hostname, output_directory, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options):
Metric.__init__(self, metric_type, infile, hostname, output_directory, resource_path, label, ts_start, ts_end,
rule_strings, important_sub_metrics, anomaly_detection_metrics)
# Allow user to specify interested processes; in the format of 'PID=11 22' and 'COMMAND=firefox top'
# It will search for any processes that match the PIDs listed or the commands listed. It's not an intersection of the PIDs and commands.
self.PID = []
self.COMMAND = []
self.ts_valid_lines = True
for (key, val) in other_options.iteritems():
setattr(self, key, val.split())
self.sub_metrics = None
self.process_headers = []
self.ts = ''
self.ts_date = ''
self.ts_time = ''
self.saw_pid = False # Controls when to process individual commands;
self.data = {} # Stores all data to be written out
for key, val in other_options.iteritems():
setattr(self, key, val.split())
self.sub_metric_description = {
'uptime_minute' : 'uptime of the machine',
'num_users' : 'users sessions logged in',
'load_aver_1_minute' : 'average load on the system (last 1 minute)',
'load_aver_5_minute' : 'average load on the system (last 5 minutes)',
'load_aver_15_minute' : 'average load on the system (last 15 minutes)',
'tasks_total' : 'total processes',
'tasks_running' : 'processes running',
'tasks_sleeping' : 'processes sleeping',
'tasks_stopped' : 'processes stopped',
'tasks_zombie' : 'zombies',
'cpu_us' : 'cpu percentage of running user processes',
'cpu_sy' : 'cpu percentage of running system processes',
'cpu_id' : 'cpu percentage of idel time',
'cpu_ni' : 'cpu percentage of running niced processes',
'cpu_wa' : 'cpu percentage of waiting for IO',
'cpu_hi' : 'cpu percentage of serving hardware IRQ',
'cpu_si' : 'cpu percentage of serving software IRQ',
'cpu_st' : 'cpu percentage of being stolen',
'mem_total' : 'total memory in GB',
'mem_used' : 'total memory in use in GB',
'mem_free' : 'total free memory in GB',
'mem_buffers' : 'total buffers in GB',
'swap_total' : 'total swap size in GB',
'swap_used' : 'total swap in use in GB',
'swap_free' : 'total free swap in GB',
'swap_cached' : 'total swap cache in GB',
}
def put_values_into_data(self, values):
"""
Take the (col, value) in 'values', append value into 'col' in self.data[]
"""
for col, value in values.items():
if col in self.column_csv_map:
out_csv = self.column_csv_map[col]
else:
out_csv = self.get_csv(col) # column_csv_map[] is assigned in get_csv()
self.data[out_csv] = []
self.data[out_csv].append(self.ts + "," + value)
def process_top_line(self, words):
"""
Process the line starting with "top"
Example log: top - 00:00:02 up 32 days, 7:08, 19 users, load average: 0.00, 0.00, 0.00
"""
self.ts_time = words[2]
self.ts = self.ts_date + ' ' + self.ts_time
self.ts = ts = naarad.utils.get_standardized_timestamp(self.ts, None)
if self.ts_out_of_range(self.ts):
self.ts_valid_lines = False
else:
self.ts_valid_lines = True
up_days = int(words[4])
up_hour_minute = words[6].split(':') # E.g. '4:02,'
up_minutes = int(up_hour_minute[0]) * 60 + int(up_hour_minute[1].split(',')[0])
uptime_minute = up_days * 24 * 60 + up_minutes # Converting days to minutes
values = {}
values['uptime_minute'] = str(uptime_minute)
values['num_users'] = words[7]
values['load_aver_1_minute'] = words[11][:-1]
values['load_aver_5_minute'] = words[12][:-1]
values['load_aver_15_minute'] = words[13]
self.put_values_into_data(values)
def process_tasks_line(self,words):
"""
Process the line starting with "Tasks:"
Example log: Tasks: 446 total, 1 running, 442 sleeping, 2 stopped, 1 zombie
"""
words = words[1:]
length = len(words) / 2 # The number of pairs
values = {}
for offset in range(length):
k = words[2 * offset + 1].strip(',')
v = words[2 * offset]
values['tasks_' + k] = v
self.put_values_into_data(values)
def process_cpu_line(self, words):
"""
Process the line starting with "Cpu(s):"
Example log: Cpu(s): 1.3%us, 0.5%sy, 0.0%ni, 98.2%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
"""
values = {}
for word in words[1:]:
val, key = word.split('%')
values['cpu_' + key.strip(',')] = val
self.put_values_into_data(values)
def convert_to_G(self, word):
"""
Given a size such as '2333M', return the converted value in G
"""
value = 0.0
if word[-1] == 'G' or word[-1] == 'g':
value = float(word[:-1])
elif word[-1] == 'M' or word[-1] == 'm':
value = float(word[:-1]) / 1000.0
elif word[-1] == 'K' or word[-1] == 'k':
value = float(word[:-1]) / 1000.0 / 1000.0
else: # No unit
value = float(word) / 1000.0 / 1000.0 / 1000.0
return str(value)
def process_mem_line(self, words):
"""
Process the line starting with "Mem:"
Example log: Mem: 62.841G total, 16.038G used, 46.803G free, 650.312M buffers
For each value, needs to convert to 'G' (needs to handle cases of K, M)
"""
words = words[1:]
length = len(words) / 2 # The number of pairs
values = {}
for offset in range(length):
k = words[2 * offset + 1].strip(',')
v = self.convert_to_G(words[2 * offset])
values['mem_' + k] = v
self.put_values_into_data(values)
def process_swap_line(self, words):
"""
Process the line starting with "Swap:"
Example log: Swap: 63.998G total, 0.000k used, 63.998G free, 11.324G cached
For each value, needs to convert to 'G' (needs to handle cases of K, M)
"""
words = words[1:]
length = len(words) / 2 # The number of pairs
values = {}
for offset in range(length):
k = words[2 * offset + 1].strip(',')
v = self.convert_to_G(words[2 * offset])
values['swap_' + k] = v
self.put_values_into_data(values)
def process_individual_command(self, words):
"""
process the individual lines like this:
#PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
29303 root 20 0 35300 2580 1664 R 3.9 0.0 0:00.02 top
11 root RT 0 0 0 0 S 1.9 0.0 0:18.87 migration/2
3702 root 20 0 34884 4192 1692 S 1.9 0.0 31:40.47 cf-serverd
It does not record all processes due to memory concern; rather only records interested processes (based on user input of PID and COMMAND)
"""
pid_index = self.process_headers.index('PID')
proces_index = self.process_headers.index('COMMAND')
pid = words[pid_index]
process = words[proces_index]
if pid in self.PID or process in self.COMMAND:
process_name = process.split('/')[0]
values = {}
for word_col in self.process_headers:
word_index = self.process_headers.index(word_col)
if word_col in ['VIRT', 'RES', 'SHR']: # These values need to convert to 'G'
values[process_name + '_' + pid + '_' + word_col] = self.convert_to_G(words[word_index])
elif word_col in ['PR', 'NI', '%CPU', '%MEM']: # These values will be assigned later or ignored
values[process_name + '_' + pid + '_' + word_col.strip('%')] = words[word_index]
uptime_index = self.process_headers.index('TIME+')
uptime = words[uptime_index].split(':')
uptime_sec = float(uptime[0]) * 60 + float(uptime[1])
values[process_name + '_' + pid + '_' + 'TIME'] = str(uptime_sec)
self.put_values_into_data(values)
def parse(self):
"""
Parse the top output file
Return status of the metric parse
The raw log file is like the following:
2014-06-23
top - 00:00:02 up 18 days, 7:08, 19 users, load average: 0.05, 0.03, 0.00
Tasks: 447 total, 1 running, 443 sleeping, 2 stopped, 1 zombie
Cpu(s): 1.6%us, 0.5%sy, 0.0%ni, 97.9%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
Mem: 62.841G total, 15.167G used, 47.675G free, 643.434M buffers
Swap: 63.998G total, 0.000k used, 63.998G free, 11.324G cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
1730 root 20 0 4457m 10m 3328 S 1.9 0.0 80:13.45 lwregd
The log lines can be generated by echo $t >> $RESULT/top.out &; top -b -n $COUNT -d $INTERVAL | grep -A 40 '^top' >> $RESULT/top.out &
"""
for infile in self.infile_list:
logger.info('Processing : %s', infile)
status = True
file_status = naarad.utils.is_valid_file(infile)
if not file_status:
return False
with open(infile) as fh:
for line in fh:
words = line.split()
if not words:
continue
# Pattern matches line of '2014-02-03'
if re.match('^\d\d\d\d-\d\d-\d\d$', line):
self.ts_date = words[0]
continue
prefix_word = words[0].strip()
if prefix_word == 'top':
self.process_top_line(words)
self.saw_pid = False # Turn off the processing of individual process line
elif self.ts_valid_lines:
if prefix_word == 'Tasks:':
self.process_tasks_line(words)
elif prefix_word == 'Cpu(s):':
self.process_cpu_line(words)
elif prefix_word == 'Mem:':
self.process_mem_line(words)
elif prefix_word == 'Swap:':
self.process_swap_line(words)
elif prefix_word == 'PID':
self.saw_pid = True # Turn on the processing of individual process line
self.process_headers = words
else: # Each individual process line
if self.saw_pid and len(words) >= len(self.process_headers): # Only valid process lines
self.process_individual_command(words)
# Putting data in csv files;
for out_csv in self.data.keys(): # All sub_metrics
self.csv_files.append(out_csv)
with open(out_csv, 'w') as fh:
fh.write('\n'.join(self.data[out_csv]))
gc.collect()
return status
|
import os
import sys
from pip._vendor.distlib import scripts
#specs = 'nova = novaclient.shell:main'
specs = sys.argv[1]
scripts_path = os.path.join(os.path.dirname(sys.executable), 'Scripts')
m = scripts.ScriptMaker(None, scripts_path)
m.executable = sys.executable
m.make(specs)
|
"""controlling.py controller deed module
"""
#print("module {0}".format(__name__))
import math
import time
import struct
from collections import deque
import inspect
from ....aid.sixing import *
from ....aid.odicting import odict
from ....aid import aiding, navigating, blending
from ....base import storing
from ....base import doing
from ....aid.consoling import getConsole
console = getConsole()
class ControllerBase(doing.DoerLapse):
"""
Base class to provide backwards compatible ._initio interface
"""
def _initio(self, ioinits):
"""
Initialize Actor data store interface from ioinits odict
Wrapper for backwards compatibility to new ._initio signature
"""
self._prepio(**ioinits)
return odict()
#Class definitions
class ControllerPid(ControllerBase):
"""PIDController DeedLapse Deed Class
PID Controller Class
"""
def __init__(self, **kw):
"""Initialize instance
inherited instance attributes
.stamp = time stamp
.lapse = time lapse between updates of controller
.name
.store
"""
#call super class method
super(ControllerPid,self).__init__(**kw)
self.lapse = 0.0 #time lapse in seconds calculated on update
def _prepio(self, group, output, input, rate, rsp, parms = None, **kw):
""" Override default since legacy deed interface
group is path name of group in store, group has following subgroups or shares:
group.parm = share for data structure of fixed parameters or coefficients
parm has the following fields:
wrap = where setpoint wraps around must be positive
drsp = delta rsp needed to indicate change in rsp avoids rounding error
calcRate = True rate is time difference, False rate is rate sensor input
ger = error rate to rate conversion gain
gff = feedforward reference to controller gain
gpe = proportional error gain
gde = derivative error gain
gie = integral error gain
esmax = maximum error sum
esmin = minimum error sum
ovmax = maximum controller output value
ovmin = minimum controller output value
group.elapsed = share copy of lapse for logging
group.prsp = share of prior reference set point needed to compute if changed
group.e = share of error between rsp and input value appropriately scaled
group.er = share of rate of change of error
group.es = share of summation of error
output is path name of share for output/truth of arbiter
input = path name of input controlled variable
rate = path name to input sensed rate of change of controlled variable
rsp = path name of reference set point for controlled variable
parms is optional dictionary of initial values for group.parm fields
instance attributes
.output = reference to output share
.group = copy of group name
.parm = reference to input parameter share
.elapsed = referenceto lapse share
.prsp = reference to prior ref set point share
.e = reference to error share
.er = reference to error rate share
.es = reference to error sum share
.input = reference to input share
.rate = reference to input rate share
.rsp = reference to input reference set point
"""
self.group = group
self.parm = self.store.create(group + '.parm')#create if not exist
if not parms:
parms = dict(wrap = 0.0, drsp = 0.01, calcRate = True,
ger = 1.0, gff = 0.0, gpe = 0.0, gde = 0.0, gie = 0.0,
esmax = 0.0, esmin = 0.0, ovmax = 0.0, ovmin = 0.0)
self.parm.create(**parms)
self.elapsed = self.store.create(group + '.elapsed').create(value = 0.0)#create if not exist
self.prsp = self.store.create(group + '.prsp').create(value = 0.0)#create if not exist
self.e = self.store.create(group + '.error').create(value = 0.0)#create if not exist
self.er = self.store.create(group + '.errorRate').create(value = 0.0)#create if not exist
self.es = self.store.create(group + '.errorSum').create(value = 0.0)#create if not exist
self.output = self.store.create(output).update(value = 0.0) #force update not just create
self.input = self.store.create(input).create(value = 0.0) #create if not exist
self.rate = self.store.create(rate).create(value = 0.0)
self.rsp = self.store.create(rsp).create(value = 0.0)
def restart(self):
"""Restart controller """
self.es.value = 0.0 #reset integrator error sum to zero
def action(self, **kw):
"""update will use inputs from store
assumes all inputs come from deeds that use value as their output attribute name
"""
super(ControllerPid,self).action(**kw) #computes lapse here
self.elapsed.value = self.lapse #update share
if self.lapse <= 0.0: #only evaluate controller if lapse positive so rate calc good
return
input = self.input.value #get from store
rate = self.rate.value #get from store
rsp = self.rsp.value #get from store
prsp = self.prsp.value #get from store
if abs(rsp - prsp) > self.parm.data.drsp: #rsp changed
self.prsp.value = rsp
self.es.value = 0.0 #reset integrator
else: #to minimize noise use prsp in case changed a little
rsp = prsp
pe = self.e.value #prior error
# update error, unwrap error if setpoint wraps
#error = input - self.rsp but may wrap so wan't shortest error if wraps
e = navigating.wrap2(angle = (input - rsp), wrap = self.parm.data.wrap)
self.e.value = e
if self.parm.data.calcRate: #calculate error rate with time derivative
er = (e - pe)/self.lapse
else: #get error rate from rate sensor
er= self.parm.data.ger * rate #convert measured rate sign and units
self.er.value = er
es = self.es.value
ae = self.lapse * (e + pe)/2.0 #use average error over time lapse
#update errorSum filter only when small error and small error rate
es += ae * blending.blend0(ae,0.0, 3.0) * blending.blend0(er,0.0, 0.1)
es = min(self.parm.data.esmax,max(self.parm.data.esmin,es)) #hard limit so no windup
self.es.value = es
out = self.parm.data.gff * rsp +\
self.parm.data.gpe * e +\
self.parm.data.gde * er +\
self.parm.data.gie * es
out = min(self.parm.data.ovmax,max(self.parm.data.ovmin,out))
self.output.value = out
return None
def _expose(self):
"""
prints out controller state
"""
print("Controller PID %s stamp = %s lapse = %0.3f input = %0.3f set point = %0.3f " %\
(self.name, self.stamp, self.lapse, self.input.value, self.rsp.value))
print(" error = %0.3f errorRate = %0.3f errorSum = %0.3f output = %0.3f truth = %s" %\
(self.e.value, self.er.value, self.es.value, self.output.value, self.output.truth))
ControllerPid.__register__('ControllerPidSpeed', ioinits=odict(
group = 'controller.pid.speed', output = 'goal.rpm',
input = 'state.speed', rate = 'state.speedRate', rsp = 'goal.speed',
parms = dict(wrap = 0.0, drsp = 0.01, calcRate = True,
ger = 1.0, gff = 400.0, gpe = 0.0, gde = 0.0, gie = 0.0,
esmax = 0.0, esmin = 0.0, ovmax = 1500.0, ovmin = 0.0)) )
ControllerPid.__register__('ControllerPidHeading', ioinits=odict(
group = 'controller.pid.heading', output = 'goal.rudder',
input = 'state.heading', rate = 'state.headingRate', rsp = 'goal.heading',
parms = dict(wrap = 180.0, drsp = 0.01, calcRate = True,
ger = 1.0, gff = 0.0, gpe = 3.0, gde = 0.0, gie = 0.0,
esmax = 0.0, esmin = 0.0, ovmax = 20.0, ovmin = -20.0)) )
ControllerPid.__register__('ControllerPidDepth', ioinits=odict(
group = 'controller.pid.depth', output = 'goal.pitch',
input = 'state.depth', rate = 'state.depthRate', rsp = 'goal.depth',
parms = dict(wrap = 0.0, drsp = 0.01, calcRate = True,
ger = 1.0, gff = 0.0, gpe = 8.0, gde = 0.0, gie = 1.0,
esmax = 5.0, esmin = -5.0, ovmax = 10.0, ovmin = -10.0)) )
ControllerPid.__register__('ControllerPidPitch', ioinits=odict(
group = 'controller.pid.pitch', output = 'goal.stern',
input = 'state.pitch', rate = 'state.pitchRate', rsp = 'goal.pitch',
parms = dict(wrap = 180.0, drsp = 0.01, calcRate = True,
ger = 1.0, gff = 0.0, gpe = 2.0, gde = 0.0, gie = 0.0,
esmax = 0.0, esmin = 0.0, ovmax = 20.0, ovmin = -20.0)) )
|
"""Tests for fileUtils.py"""
from typing import List, Sequence
import pytest
import csv
import pandas as pd
from pathlib import Path
from nna import fileUtils
from testparams import INPUTS_OUTPUTS_PATH
IO_fileUtils_path = INPUTS_OUTPUTS_PATH / "fileUtils"
test_data_save_to_csv = [
(
(IO_fileUtils_path / "save_to_csv" / "outputs" / "test"),
[
("V1_firstLine-FirstItem", "firstLine-SecondItem"),
("V1_secondLine-FirstItem", "secondLine-SecondItem"),
],
[
["V1_firstLine-FirstItem", "firstLine-SecondItem"],
["V1_secondLine-FirstItem", "secondLine-SecondItem"],
], # result should be previous lines and this one
),
(
(IO_fileUtils_path / "save_to_csv" / "outputs" / "test.csv"),
[
("V2_firstLine-FirstItem", "firstLine-SecondItem"),
("V2_secondLine-FirstItem", "secondLine-SecondItem"),
],
[
["V2_firstLine-FirstItem", "firstLine-SecondItem"],
["V2_secondLine-FirstItem", "secondLine-SecondItem"],
], # result should be previous lines and this one
),
(
(IO_fileUtils_path / "save_to_csv" / "outputs" / "test2"),
[
("V3_firstLine-FirstItem", "firstLine-SecondItem"),
("V3_secondLine-FirstItem", "secondLine-SecondItem"),
],
[
["V3_firstLine-FirstItem", "firstLine-SecondItem"],
["V3_secondLine-FirstItem", "secondLine-SecondItem"],
], # result should be previous lines and this one
),
]
@pytest.fixture(scope="function")
def output_folder(request):
# print("setup")
file_name, lines, expected = request.param
# print(file_name.exists())
file_name = Path(file_name).with_suffix(".csv")
file_name.parent.mkdir(parents=True, exist_ok=True)
yield (file_name, lines, expected)
print("teardown")
print(file_name)
file_name.unlink(missing_ok=True)
@pytest.mark.parametrize(
"output_folder",
test_data_save_to_csv,
indirect=True,
)
def test_save_to_csv(
# file_name,
# lines,
# expected,
output_folder): #pylint:disable=W0621
file_name, lines, expected = output_folder
fileUtils.save_to_csv(file_name, lines)
rows: List[Sequence] = []
with open(file_name, newline="", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
assert expected == rows
test_data_standard_path_style = [
("/folder1/folder2/",
pd.Series({
"locationId": 11,
"year": 2019,
"region": "Anwr"
}), "", "", Path("/folder1/folder2/Anwr/11/2019/")),
("/folder1/folder2/",
pd.Series({
"locationId": "11",
"year": "2019",
"region": "Anwr"
}), "", "", Path("/folder1/folder2/Anwr/11/2019/")),
("/folder1/folder2/",
pd.Series({
"locationId": "11",
"year": "2019",
"region": "Anwr"
}), "", "", Path("/folder1/folder2/Anwr/11/2019/")),
("folder1", pd.Series({
"locationId": "11",
"year": "2019",
"region": "Anwr"
}), "", "", Path("folder1/Anwr/11/2019/")),
("/folder1/folder2/",
pd.Series({
"locationId": 11,
"year": "2019",
"region": "Anwr"
},
name="S4A10292_20190615_094602.wav"), "_vgg", "_vgg",
Path("/folder1/folder2/Anwr/11/" +
"2019/S4A10292_20190615_094602__vgg/S4A10292_20190615_094602__vgg")),
("/folder1/folder2/",
pd.Series({
"locationId": "11",
"year": "2019",
"region": "Prudhoe"
},
name="S4A10292_20190615_094602.wav"), "", "vgg",
Path("/folder1/folder2/Prudhoe/11/" +
"2019/S4A10292_20190615_094602_vgg")),
("/folder1/folder2/",
pd.Series({
"locationId": "11",
"year": "2019",
"region": "Prudhoe"
},
name="S4A10292_20190615_094602.wav"), "XXX", "YYY",
Path("/folder1/folder2/Prudhoe/11/" +
"2019/S4A10292_20190615_094602_XXX/S4A10292_20190615_094602_YYY")),
("folder1/folder2",
pd.Series({
"locationId": "11",
"year": "2019",
"region": "Prudhoe"
},
name="S4A10292_20190615_094602.wav"), "XXX", "",
Path("folder1/folder2/Prudhoe/11/" +
"2019/S4A10292_20190615_094602_XXX/")),
("/folder1/folder2/", {
"locationId": 11,
"year": 2019,
"region": "Anwr",
"name": "S4A10292_20190615_094602.wav",
}, "", "", Path("/folder1/folder2/Anwr/11/2019/")),
]
@pytest.mark.parametrize(
"parent_path, row, sub_directory_addon, file_name_addon,expected",
test_data_standard_path_style)
def test_standard_path_style(parent_path, row, sub_directory_addon,
file_name_addon, expected):
output_path = fileUtils.standard_path_style(parent_path, row,
sub_directory_addon,
file_name_addon)
assert output_path == expected
# TEST/XX/2018/
test_data_parse_file_path = [
("S4A10327_20190531_060000_embeddings000.npy", 0, {
"timestamp": "20190531_060000",
"region": "",
"locationId": "",
"year": "",
"part_index": 0,
}),
("/S4A10307_20190731_221602_XXX11.flac", 0, {
"timestamp": "20190731_221602",
"region": "",
"locationId": "",
"year": "",
"part_index": 11,
}),
("/2019/S4A10307_20190731_221602_XXX010.flac", 0, {
"timestamp": "20190731_221602",
"region": "",
"locationId": "",
"year": "2019",
"part_index": 10,
}),
("2019/S4A10307_20190731_221602_XXX010.flac", 0, {
"timestamp": "20190731_221602",
"region": "",
"locationId": "",
"year": "2019",
"part_index": 10,
}),
("/17/2019/S4A10307_20190731_221602_XXX010.flac", 0, {
"timestamp": "20190731_221602",
"region": "",
"locationId": "17",
"year": "2019",
"part_index": 10,
}),
("/tank/data/nna/real/prudhoe/17/2019/S4A10307_20190731_221602.flac", 0, {
"timestamp": "20190731_221602",
"region": "prudhoe",
"locationId": "17",
"year": "2019",
"part_index": None,
}),
("/tank/data/nna/real/prudhoe/17/2019/S4A10307_20190731_221602_XXX000.flac",
0, {
"timestamp": "20190731_221602",
"region": "prudhoe",
"locationId": "17",
"year": "2019",
"part_index": 0,
}),
("/tank/data/nna/real/prudhoe/17/2019/S4A10307_20190731_221602_XXX010.flac",
0, {
"timestamp": "20190731_221602",
"region": "prudhoe",
"locationId": "17",
"year": "2019",
"part_index": 10,
}),
("YY/XX/tank/nna/real/prudhoe/17/2019/S4A10307_20190731_221602_XXX010.flac",
0, {
"timestamp": "20190731_221602",
"region": "prudhoe",
"locationId": "17",
"year": "2019",
"part_index": 10,
}),
]
@pytest.mark.parametrize("file_path, debug,expected", test_data_parse_file_path)
def test_parse_file_path(file_path, debug, expected):
output = fileUtils.parse_file_path(file_path, debug=debug)
assert output == expected
prudhoeAndAnwr4photoExp_dataV1 = pd.read_pickle(
"./data/database/prudhoeAndAnwr4photoExp_dataV1.pkl")
#
test_data_match_path_info2row = [
({
"timestamp": "20190811_103000",
"region": "prudhoe",
"locationId": "12",
"year": "2019",
}, prudhoeAndAnwr4photoExp_dataV1, 0,
prudhoeAndAnwr4photoExp_dataV1.loc[Path(
"/tank/data/nna/real/prudhoe/12/2019/S4A10274_20190811_103000.flac")]),
({
"timestamp": "20190621_000000",
"region": "anwr",
"locationId": "35",
"year": "2019",
}, prudhoeAndAnwr4photoExp_dataV1, 0,
prudhoeAndAnwr4photoExp_dataV1.loc[Path(
"/tank/data/nna/real/anwr/35/2019/S4A10272_20190621_000000.flac")]),
]
@pytest.mark.parametrize("path_info, file_properties_df, debug,expected",
test_data_match_path_info2row)
def test_match_path_info2row(path_info, file_properties_df, debug, expected):
output = fileUtils.match_path_info2row(path_info, file_properties_df, debug)
assert list(output[1].items()) == list(expected.items())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-05 08:47
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0416_auto_20200205_1603'),
]
operations = [
migrations.CreateModel(
name='BriefOfEvidence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('statement_of_facts', models.TextField(blank=True, null=True)),
('victim_impact_statement_taken', models.BooleanField(default=False)),
('statements_pending', models.BooleanField(default=False)),
('vulnerable_hostile_witnesses', models.BooleanField(default=False)),
('witness_refusing_statement', models.BooleanField(default=False)),
('problems_needs_prosecution_witnesses', models.BooleanField(default=False)),
('accused_bad_character', models.BooleanField(default=False)),
('further_persons_interviews_pending', models.BooleanField(default=False)),
('other_interviews', models.BooleanField(default=False)),
('relevant_persons_pending_charges', models.BooleanField(default=False)),
('other_persons_receiving_sanction_outcome', models.BooleanField(default=False)),
('local_public_interest', models.BooleanField(default=False)),
('applications_orders_requests', models.BooleanField(default=False)),
('applications_orders_required', models.BooleanField(default=False)),
('other_legal_matters', models.BooleanField(default=False)),
('victim_impact_statement_taken_details', models.TextField(blank=True, null=True)),
('statements_pending_details', models.TextField(blank=True, null=True)),
('vulnerable_hostile_witnesses_details', models.TextField(blank=True, null=True)),
('witness_refusing_statement_details', models.TextField(blank=True, null=True)),
('problems_needs_prosecution_witnesses_details', models.TextField(blank=True, null=True)),
('accused_bad_character_details', models.TextField(blank=True, null=True)),
('further_persons_interviews_pending_details', models.TextField(blank=True, null=True)),
('other_interviews_details', models.TextField(blank=True, null=True)),
('relevant_persons_pending_charges_details', models.TextField(blank=True, null=True)),
('other_persons_receiving_sanction_outcome_details', models.TextField(blank=True, null=True)),
('local_public_interest_details', models.TextField(blank=True, null=True)),
('applications_orders_requests_details', models.TextField(blank=True, null=True)),
('applications_orders_required_details', models.TextField(blank=True, null=True)),
('other_legal_matters_details', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='ProsecutionBrief',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('statement_of_facts', models.TextField(blank=True, null=True)),
('victim_impact_statement_taken', models.BooleanField(default=False)),
('statements_pending', models.BooleanField(default=False)),
('vulnerable_hostile_witnesses', models.BooleanField(default=False)),
('witness_refusing_statement', models.BooleanField(default=False)),
('problems_needs_prosecution_witnesses', models.BooleanField(default=False)),
('accused_bad_character', models.BooleanField(default=False)),
('further_persons_interviews_pending', models.BooleanField(default=False)),
('other_interviews', models.BooleanField(default=False)),
('relevant_persons_pending_charges', models.BooleanField(default=False)),
('other_persons_receiving_sanction_outcome', models.BooleanField(default=False)),
('local_public_interest', models.BooleanField(default=False)),
('applications_orders_requests', models.BooleanField(default=False)),
('applications_orders_required', models.BooleanField(default=False)),
('other_legal_matters', models.BooleanField(default=False)),
('victim_impact_statement_taken_details', models.TextField(blank=True, null=True)),
('statements_pending_details', models.TextField(blank=True, null=True)),
('vulnerable_hostile_witnesses_details', models.TextField(blank=True, null=True)),
('witness_refusing_statement_details', models.TextField(blank=True, null=True)),
('problems_needs_prosecution_witnesses_details', models.TextField(blank=True, null=True)),
('accused_bad_character_details', models.TextField(blank=True, null=True)),
('further_persons_interviews_pending_details', models.TextField(blank=True, null=True)),
('other_interviews_details', models.TextField(blank=True, null=True)),
('relevant_persons_pending_charges_details', models.TextField(blank=True, null=True)),
('other_persons_receiving_sanction_outcome_details', models.TextField(blank=True, null=True)),
('local_public_interest_details', models.TextField(blank=True, null=True)),
('applications_orders_requests_details', models.TextField(blank=True, null=True)),
('applications_orders_required_details', models.TextField(blank=True, null=True)),
('other_legal_matters_details', models.TextField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='legalcase',
name='accused_bad_character',
),
migrations.RemoveField(
model_name='legalcase',
name='accused_bad_character_details',
),
migrations.RemoveField(
model_name='legalcase',
name='applications_orders_requests',
),
migrations.RemoveField(
model_name='legalcase',
name='applications_orders_requests_details',
),
migrations.RemoveField(
model_name='legalcase',
name='applications_orders_required',
),
migrations.RemoveField(
model_name='legalcase',
name='applications_orders_required_details',
),
migrations.RemoveField(
model_name='legalcase',
name='further_persons_interviews_pending',
),
migrations.RemoveField(
model_name='legalcase',
name='further_persons_interviews_pending_details',
),
migrations.RemoveField(
model_name='legalcase',
name='local_public_interest',
),
migrations.RemoveField(
model_name='legalcase',
name='local_public_interest_details',
),
migrations.RemoveField(
model_name='legalcase',
name='other_interviews',
),
migrations.RemoveField(
model_name='legalcase',
name='other_interviews_details',
),
migrations.RemoveField(
model_name='legalcase',
name='other_legal_matters',
),
migrations.RemoveField(
model_name='legalcase',
name='other_legal_matters_details',
),
migrations.RemoveField(
model_name='legalcase',
name='other_persons_receiving_sanction_outcome',
),
migrations.RemoveField(
model_name='legalcase',
name='other_persons_receiving_sanction_outcome_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_accused_bad_character',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_accused_bad_character_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_applications_orders_requests',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_applications_orders_requests_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_applications_orders_required',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_applications_orders_required_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_further_persons_interviews_pending',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_further_persons_interviews_pending_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_local_public_interest',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_local_public_interest_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_other_interviews',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_other_interviews_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_other_legal_matters',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_other_legal_matters_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_other_persons_receiving_sanction_outcome',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_other_persons_receiving_sanction_outcome_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_problems_needs_prosecution_witnesses',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_problems_needs_prosecution_witnesses_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_relevant_persons_pending_charges',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_relevant_persons_pending_charges_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_statement_of_facts',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_statements_pending',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_statements_pending_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_victim_impact_statement_taken',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_victim_impact_statement_taken_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_vulnerable_hostile_witnesses',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_vulnerable_hostile_witnesses_details',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_witness_refusing_statement',
),
migrations.RemoveField(
model_name='legalcase',
name='pb_witness_refusing_statement_details',
),
migrations.RemoveField(
model_name='legalcase',
name='problems_needs_prosecution_witnesses',
),
migrations.RemoveField(
model_name='legalcase',
name='problems_needs_prosecution_witnesses_details',
),
migrations.RemoveField(
model_name='legalcase',
name='relevant_persons_pending_charges',
),
migrations.RemoveField(
model_name='legalcase',
name='relevant_persons_pending_charges_details',
),
migrations.RemoveField(
model_name='legalcase',
name='statement_of_facts',
),
migrations.RemoveField(
model_name='legalcase',
name='statements_pending',
),
migrations.RemoveField(
model_name='legalcase',
name='statements_pending_details',
),
migrations.RemoveField(
model_name='legalcase',
name='victim_impact_statement_taken',
),
migrations.RemoveField(
model_name='legalcase',
name='victim_impact_statement_taken_details',
),
migrations.RemoveField(
model_name='legalcase',
name='vulnerable_hostile_witnesses',
),
migrations.RemoveField(
model_name='legalcase',
name='vulnerable_hostile_witnesses_details',
),
migrations.RemoveField(
model_name='legalcase',
name='witness_refusing_statement',
),
migrations.RemoveField(
model_name='legalcase',
name='witness_refusing_statement_details',
),
migrations.AddField(
model_name='legalcase',
name='brief_of_evidence',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='legal_case', to='wildlifecompliance.BriefOfEvidence'),
),
migrations.AddField(
model_name='legalcase',
name='prosecution_brief',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='legal_case', to='wildlifecompliance.ProsecutionBrief'),
),
]
|
remoteapi_CUSTOM_ENVIRONMENT_AUTHENTICATION = ('HTTP_X_APPENGINE_INBOUND_APPID',
['test-brainstormcracy'])
|
from Statistics.Stats.mean import mean
from Statistics.Stats.standard_deviation import standard_deviation
def Z_score(my_population):
mean_result = mean(my_population)
stdev_result = standard_deviation(my_population)
zscore = list()
for x in my_population:
my_score = (x - mean_result) / stdev_result
zscore.append(round(my_score, 2))
return zscore
|
import unittest
import carbonate.sieve
class FilterTest(unittest.TestCase):
def setUp(self):
config_file = "tests/conf/simple.conf"
config = carbonate.config.Config(config_file)
self.cluster = carbonate.cluster.Cluster(config)
def test_sieve(self):
inputs = ['metric.100',
'metric.101',
'metric.102',
'metric.103',
'metric.104',
'metric.105',
'metric.106',
'metric.107',
'metric.108',
'metric.109']
node = '1.1.1.1'
node_long = '1.1.1.1:2003:0'
output = ['metric.101',
'metric.102',
'metric.103',
'metric.105',
'metric.107',
'metric.108']
node2 = '2.2.2.2'
node2_long = '2.2.2.2:2003:0'
output2 = ['metric.100',
'metric.104',
'metric.106',
'metric.109']
f = list(carbonate.sieve.filter_metrics(inputs, node, self.cluster))
self.assertEqual(f, output)
f = list(carbonate.sieve.filter_metrics(inputs, node_long, self.cluster))
self.assertEqual(f, output)
f = list(carbonate.sieve.filter_metrics(inputs, node2, self.cluster))
self.assertEqual(f, output2)
f = list(carbonate.sieve.filter_metrics(inputs, node2_long, self.cluster))
self.assertEqual(f, output2)
f = list(carbonate.sieve.filter_metrics(inputs, node, self.cluster, True))
self.assertEqual(f, output2)
f = list(carbonate.sieve.filter_metrics(inputs, node2, self.cluster, True))
self.assertEqual(f, output)
|
from unittest.mock import Mock, PropertyMock
import events.events # type: ignore
from systemlink.clients.tag._core._manual_reset_timer import ManualResetTimer
def MockManualResetTimer(): # noqa: N802
"""Construct a mock ManualResetTimer"""
m = Mock(ManualResetTimer)
type(m).elapsed = PropertyMock(return_value=events.events._EventSlot("elapsed"))
type(m).__aenter__ = ManualResetTimer.__aenter__
type(m).__aexit__ = ManualResetTimer.__aexit__
type(m).__enter__ = ManualResetTimer.__enter__
type(m).__exit__ = ManualResetTimer.__exit__
return m
|
import numpy as np
import time
from toolbox.estimator.gcmi_estimator import copnorm, GCMIEstimator
## Check timing for entropy computation
from toolbox.estimator.linear_estimator import LinearEstimator
N = 10000 # 817
# M = 3 to 6 variables
M = 100
X = np.random.randn(M,N) # note the difference -
# the matlab code has it (N,M), its just the way the function expects the input
# ts = scipy.io.loadmat(os.path.join('data','ts.mat'))
# ts = np.array(ts['ts'])
# X = ts[:,:].T
t = time.time()
estimator = LinearEstimator()
e = estimator.estimate_entropy(X)
elapsed = time.time() - t
print("Covariance matrix based: Elapsed time is ", elapsed, " seconds.")
print(e)
t = time.time()
X = copnorm(X)
estimator = GCMIEstimator()
entropy = estimator.estimate_entropy(X)
elapsed = time.time() - t
print("GCMI based: Elapsed time is ", elapsed, " seconds.")
print(entropy)
#####################
## Check timing for conditional mutual information computation
# N = 653
# A = np.random.randn(1,N)
# B = np.random.randn(6,N)
# C = np.random.randn(3,N)
# t = time.time()
# mi = lin_cmi_ccc(A.T, B.T, C.T)
# elapsed = time.time() - t
# print("Covariance matrix based: Elapsed time is ", elapsed, " seconds.")
# print(mi)
# t = time.time()
# A = copnorm(A)
# B = copnorm(B)
# C = copnorm(C)
# mi = gccmi_ccc_nocopnorm(A,B,C)
# elapsed = time.time() - t
# print("GCMI based: Elapsed time is ", elapsed, " seconds.")
# print(mi)
|
# Example for "arrow functions". To run this example:
#
# python3 -m imphook -i mod_arrow_func -m example_arrow_func
f = (a, b) => a + b
print(f(1, 2))
res = ((a, b) => a + b)(3, 4)
print(res)
print(list(map((x) => x * 2, [1, 2, 3, 4])))
curry = (a) => (b) => a + b
print(curry(3)(100))
# Confirm there's no crashing on bare tuple at the end of file.
(1, 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.