content
stringlengths 5
1.05M
|
|---|
import pdb
import json
import copy
import inspect
import pandas as pd
import numpy as np
import uuid
from sqlalchemy import create_engine
from sqlalchemy import select, and_
from sqlalchemy import create_engine, select, and_, or_
from sqlalchemy.pool import NullPool
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from PyFin.api import advanceDateByCalendar, bizDatesList
from alphax.alpha191 import Alpha191
from alphax.model import Market
import short_uuid
class CustomClient(object):
def __init__(self):
__str__ = 'CustomClient'
##写入数据库
destination = sa.create_engine("")
self.destsession = sessionmaker( bind=destination, autocommit=False, autoflush=True)
def update_destdb(self, table_name, sets):
sets = sets.where(pd.notnull(sets), None)
sql_pe = 'INSERT INTO {0} SET'.format(table_name)
updates = ",".join( "{0} = :{0}".format(x) for x in list(sets) )
sql_pe = sql_pe + '\n' + updates
sql_pe = sql_pe + '\n' + 'ON DUPLICATE KEY UPDATE'
sql_pe = sql_pe + '\n' + updates
session = self.destsession()
print('update_destdb:' + str(table_name))
for index, row in sets.iterrows():
dict_input = dict( row )
dict_input['trade_date'] = dict_input['trade_date'].to_pydatetime()
session.execute(sql_pe, dict_input)
session.commit()
session.close()
def get_datasets(self, begin_date, end_date):
alpha_number = 191
engine = create_engine('')
query = select([Market]).where(
and_(Market.trade_date >= begin_date, Market.trade_date <= end_date, ))
mkt_df = pd.read_sql(query, engine)
mkt_df.rename(columns={'preClosePrice':'pre_close','openPrice':'open_price',
'highestPrice':'highest_price','lowestPrice':'lowest_price',
'closePrice':'close_price','turnoverVol':'turnover_vol',
'turnoverValue':'turnover_value','accumAdjFactor':'accum_adj',
'vwap':'vwap'}, inplace=True)
mkt_df = mkt_df[[('000000' + str(code))[-6:][0] in '036' for code in mkt_df['code']]]
trade_date_list = list(set(mkt_df.trade_date))
trade_date_list.sort(reverse=True)
mkt_df = mkt_df.set_index(['trade_date', 'code'])
mkt_df = mkt_df[mkt_df['turnover_vol'] > 0]
# backward adjustment of stock price
for p in mkt_df.columns:
if p in ['open_price', 'highest_price', 'lowestPrice', 'close_price', 'vwap']:
mkt_df[p] = mkt_df[p] * mkt_df['accum_adj']
total_data = mkt_df.to_panel()
return total_data, trade_date_list
def create_func(self, setting_file):
func_list = []
with open(setting_file,'rb') as f:
content = f.read()
json_ob = json.loads(content)
for key,values in json_ob.items():
alpha_fun_name = 'Alpha191().alpha_' + str(key)
alpha_fun = eval(alpha_fun_name)
fun_param = inspect.signature(alpha_fun).parameters
dependencies = fun_param['dependencies'].default
max_window = fun_param['max_window'].default
alpha_fun_name += '(data=data'
for params in values:
alpha_fun1 = copy.deepcopy(alpha_fun_name)
max_windows=0
for pkey,pvalue in params.items():
alpha_fun1 += ',{0}={1}'.format(pkey, pvalue)
if 'param' in pkey:
max_windows += abs(pvalue)
alpha_fun1 += ')'
windows = max_windows if max_windows > max_window else max_window
func_list.append({'func':alpha_fun1,'dependencies':dependencies,
'max_window':windows})
return func_list
##自定义参数函数
def custom_func(self, setting_file, begin_date, end_date):
func_list = self.create_func(setting_file)
total_data,trade_date_list = self.get_datasets(begin_date, end_date)
for func_info in func_list:
session_id = str(short_uuid.decode(short_uuid.uuid(func_info['func'])))
dependencies = func_info['dependencies']
max_window = func_info['max_window']
func = func_info['func']
for date in trade_date_list:
begin = advanceDateByCalendar('china.sse', date, '-%sb' % (max_window - 1))
data = {}
for dep in dependencies:
data[dep] = total_data[dep].loc[begin:date]
print(func,date)
result = pd.DataFrame(eval(func),columns=['value'])
result['session'] = session_id
result['params'] = func_info['func']
result['trade_date'] = date
result = result.reset_index()
self.update_destdb('train_factors', result)
if __name__ == "__main__":
client = CustomClient()
client.custom_func('Alpha191_param.json', '2017-01-01', '2018-12-28')
|
import tkinter as tk
from tkinter import ttk
from collections import deque
class Timer(ttk.Frame):
"""parent is the frame which contains the timer frame self is the object whose properties are being created
and controller is the class whose properties are inherited....tk.Frame properties are also inherited"""
def __init__(self, parent, controller, show_settings):
super().__init__(parent)
self['style'] = 'Background.TFrame'
# setting the object as the controller
self.controller = controller
pomodoro_time = int(controller.pomodoro.get())
# variable to hold the current time with default value
self.current_time = tk.StringVar(value=f'{pomodoro_time:02d}:00')
# variable to hold the current phase of the timer_Schedule
self.current_timer_label = tk.StringVar(value=controller.timer_schedule[0])
# timer_running variable with boolean value false as timer is initially off
# it will start after clicking start button
self.timer_running = False
# private variable to stop the execution of after method in decrement method
self._timer_decrement_job = None
# label showing the current phase
timer_description = ttk.Label(
self,
textvariable=self.current_timer_label,
style='LightText.TLabel'
)
timer_description.grid(row=0, column=0, sticky='W', padx=(10, 0), pady=(10, 0))
# button to witch frame from timer to settings frame
settings_button = ttk.Button(
self,
text='Settings',
command=show_settings,
style='PomodoroButton.TButton',
cursor='hand2'
)
settings_button.grid(row=0, column=1, sticky='E', padx=10, pady=10)
timer_frame = ttk.Frame(self, height='100', style='Timer.TFrame')
timer_frame.grid(row=1, column=0, columnspan=2, pady=(10, 0), sticky='NSEW')
# counter label in timer_frame
timer_counter = ttk.Label(timer_frame,
textvariable=self.current_time,
style='TimerText.TLabel',
)
timer_counter.place(relx=0.5, rely=0.5, anchor='center') # positioning method like grid
# Button containing frame
button_container = ttk.Frame(self, padding=100, style='Background.TFrame')
button_container.grid(row=2, column=0, columnspan=2, sticky='EW')
button_container.columnconfigure((0, 1, 2), weight=1)
self.start_button = ttk.Button(
button_container,
text='Start',
command=self.start_timer,
style='PomodoroButton.TButton',
cursor='hand2' # change the appearance of cursor on the button
)
self.start_button.grid(row=0, column=0, sticky='EW')
self.stop_button = ttk.Button(
button_container,
text='Stop',
state='disabled', # initially off
command=self.stop_timer,
style='PomodoroButton.TButton',
cursor='hand2'
)
self.stop_button.grid(row=0, column=1, sticky='EW', padx=5)
"""self not used with reset_button and rest_timer because we don't want to use them out of this class"""
reset_button = ttk.Button(
button_container,
text='Reset',
command=self.reset_timer,
style='PomodoroButton.TButton',
cursor='hand2'
)
reset_button.grid(row=0, column=2, sticky='EW')
def start_timer(self):
self.timer_running = True # setting the timer status on after clicking start
self.start_button['state'] = 'disabled' # disables the start button after start of timer
self.stop_button['state'] = 'enabled' # enable the stop button after start of timer which was initially disable
self.decrement_time()
def stop_timer(self):
self.timer_running = False # on click of stop ,off the timer
self.stop_button['state'] = 'disabled' # disables the stop button after the click
self.start_button['state'] = 'enabled' # enables the start button after the start of button
if self._timer_decrement_job: # when the _timer_decrement_job found
self.after_cancel(self._timer_decrement_job) # cancel the further execution
self._timer_decrement_job = None # set the value of the _timer_decrement_job to None
def reset_timer(self):
self.stop_timer()
pomodoro_time = int(self.controller.pomodoro.get()) # getting value of pomodoro time from pomodoro class
self.current_time.set(f'{pomodoro_time:02d}:00') # set the current time to 25 after click of button
self.controller.timer_schedule = deque(self.controller.timer_order) # change timer schedule to initial state
self.current_timer_label.set(self.controller.timer_schedule[0]) # update timer label with first value of queue
def decrement_time(self):
"""This function reducing or updating the label every second"""
current_time = self.current_time.get()
if self.timer_running and current_time != '00:00': # timer is running
minutes, seconds = current_time.split(':') # splitting the string values into two variables
if int(seconds) > 0: # never let seconds be negative
seconds = int(seconds)-1
minutes = int(minutes)
else: # sets the timer to max after reaching zero
seconds = 59
minutes = int(minutes)-1
# setting the label value
self.current_time.set(f'{minutes:02d}:{seconds:02d}')
# calling the decrement function repeatedly after a second
self._timer_decrement_job = self.after(1000, self.decrement_time)
elif self.timer_running and current_time == '00:00':
self.controller.timer_schedule.rotate(-1) # rotate the list in reverse
next_up = self.controller.timer_schedule[0] # put the last element at first
# variable constantly updating the phase of scheduler after each phase changes
self.current_timer_label.set(next_up)
# checking which element is now at first position in task_order
# setting the current time accordingly
if next_up == 'Pomodoro':
pomodoro_time = int(self.controller.pomodoro.get())
self.current_time.set(f'{pomodoro_time:02d}:00')
elif next_up == 'Short Break':
short_break_time = int(self.controller.short_break.get())
self.current_time.set(f'{short_break_time:02d}:00')
elif next_up == 'Long Break':
long_break_time = int(self.controller.long_break.get())
self.current_time.set(f'{long_break_time:02d}:00')
self._timer_decrement_job = self.after(1000, self.decrement_time)
|
#The World's Most Annoying E-Book
#Concepts Used:
#print()
#input()
#newline
#tab
#comments
print("\n\t\tThe World's Most Annoying E-Book")
print("\n\n\nBy Rich Williams")
input("\nPress Enter to Begin")
#Title
print("\n\nA Tale of Two Cities")
print("By Charles Dickens")
input("\n\nPress Enter to Continue")
#The Really Annoying Part
print("\n\nIt")
print("was")
print("the")
print("best")
print("of")
print("times,")
print("it")
print("was")
print("the")
print("worst")
print("of")
print("times,")
print("it")
print("was")
print("the")
print("age")
print("of")
print("wisdom,")
print("it")
print("was")
print("the")
print("age")
print("of")
print("foolishness,")
print("it")
print("was")
print("the")
print("epoch")
print("of")
print("belief,")
print("it")
print("was")
print("the")
print("epoch")
print("of")
print("incredulity,")
print("it")
print("was")
print("the")
print("season")
print("of")
print("Light,")
print("it")
print("was")
print("the")
print("season")
print("of")
print("Darkness,")
print("it")
print("was")
print("the")
print("spring")
print("of")
print("hope,")
print("it")
print("was")
print("the")
print("winter")
print("of")
print("despair,")
print("we")
print("had")
print("everything")
print("before")
print("us,")
print("we")
print("had")
print("nothing")
print("before")
print("us,")
print("we")
print("were")
print("all")
print("going")
print("direct")
print("to")
print("heaven,")
print("we")
print("were")
print("all")
print("going")
print("direct")
print("the")
print("other")
print("way")
print("-")
print("in")
print("short")
print("the")
print("period")
print("was")
print("so")
print("far")
print("like")
print("the")
print("present")
print("period,")
print("that")
print("some")
print("of")
print("its")
print("noisiest")
print("authorities")
print("insisted")
print("on")
print("its")
print("being")
print("received,")
print("for")
print("good")
print("or")
print("for")
print("evil,")
print("in")
print("the")
print("superlative")
print("degree")
print("of")
print("comparison")
print("only.")
#Conclusion
input("\nDo you really want to continue?")
print("\nThen go buy a Kindle")
input("\n\nPress Enter to Exit")
|
################################################################################
##
## 新浪微博热点事件发现与脉络生成系统
##
## @Filename ./HotspotsAnalysis/CorrelationAnalysis.py
## @Author 李林峰, 刘臻, 徐润邦, 马伯乐, 朱杰, 瞿凤业
## @Version 3.1
## @Date 2019/09/06
## @Copyright Copyright (c) 2019. All rights reserved.
## @Description 本文件实现热点事件相关性分析模块
##
################################################################################
import numpy as np
import os
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
MYDIR = os.path.dirname(__file__)
## 模块内部接口
# <summary> 计算两个向量之间的余弦相似度 </summary>
# <param>
# vector_a (一维列表): [num1, num2, ...]
# vector_a (一维列表): [num1, num2, ...]
# </param>
# <return>
# sim (float): 相关性系数
# </return>
def cos_sim(vector_a, vector_b):
vector_a = np.mat(vector_a)
vector_b = np.mat(vector_b)
num = float(vector_a * vector_b.T)
denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
if denom == 0:
sim = 0.0
else:
cos = num / denom
sim = 0.5*cos + 0.5
return sim
## 模块对外接口
# <summary> 生成相关性矩阵 </summary>
# <param>
# input_file_path (str) 热点事件的文件路径
# output_filepath (str) 输出关联性矩阵的文件路径
# </param>
# <io>
# file input: 从 input_file_path (str) 路径读入热点事件
# file output:向 output_filepath (str) 路径写入事件关联性矩阵 "num11 num12 ...\n num21 num22 ...\n ..."
# </io>
def generate_correlation_matrics(input_file_path, output_file_path):
blogs = []
with open(os.path.join(MYDIR,input_file_path), 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
content = line.split(',')[2]
blogs.append(content)
# print(blogs)
# 构建TF-IDF向量
vectorizer = CountVectorizer()
count = vectorizer.fit_transform(blogs)
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(count)
weight = tfidf.toarray()
# 计算相关性系数,存入矩阵
correlation_matrics = []
n = len(blogs)
for i in range(n):
tmp = []
for j in range(n):
cos_between_two_matric = cos_sim(weight[i], weight[j])
tmp.append(cos_between_two_matric)
correlation_matrics.append(tmp)
with open(os.path.join(MYDIR,output_file_path), 'w', encoding='utf-8') as f:
for i in correlation_matrics:
content = " ".join('%s' % num for num in i)
f.write(content+'\n')
f.close()
|
from tensorflow.python.keras.applications.inception_v3 import InceptionV3
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense, GlobalAveragePooling2D, Dropout
from tensorflow.python.keras.layers import Input
from shared.utils import setup_trainable_layers
def InceptionV3WithCustomLayers(nb_classes, input_shape, fc_size):
"""
Adding custom final layers on Inception_V3 model with imagenet weights
Args:
nb_classes: # of classes
input_shape: input shape of the images
Returns:
new keras model with new added last layer/s and the base model which new layers are added
"""
base_model = InceptionV3(input_tensor=Input(shape=input_shape),
weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(fc_size * 2, activation='relu')(x) # new FC layer, random init
x = Dropout(0.3)(x)
# x = Dense(FC_SIZE, activation='relu')(x) # new FC layer, random init
# x = Dropout(0.5)(x)
# x = Dense(FC_SIZE * 4, activation='relu')(x) # new FC layer, random init
# x = Dropout(0.5)(x)
predictions = Dense(nb_classes, activation='softmax')(x) # new softmax layer
model = Model(outputs=predictions, inputs=base_model.input)
return model, base_model
def build_finetuned_model(args, input_shape, fc_size):
"""
Builds a finetuned InceptionV3 model from tensorflow implementation
with imagenet weights loaded and setting up new fresh prediction layers at last
Args:
args: necessary args needed for training like train_data_dir, batch_size etc...
input_shape: shape of input tensor
fc_size: number of nodes to be used in last layers will be based on this value i.e its multiples may be used
Returns:
finetuned inceptionV3 model
"""
# setup model
iv3, base_iv3 = InceptionV3WithCustomLayers(args.nb_classes, input_shape, fc_size)
setup_trainable_layers(iv3, args.layers_to_freeze)
# compiling the model
iv3.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])
return iv3
|
import matplotlib.pyplot as plt
def plot_training_history(history, show=True):
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
if show:
plt.show()
def split_name(name):
a = name.split('.')
return a[-2].split('/')[-1], a[-1]
def extract_name(name):
n = name.split('/')
l = n[-1]
return l if len(l) > 0 else n[-2]
|
import funcs as func
from flask import Flask
from flask_restful import Api, Resource
app = Flask(__name__)
api = Api(app)
import docker
try:
client = docker.from_env()
except:
exit()
def create():
output = client.containers.run("quay.io/infoupgraders/images:final", detach=True, stdin_open=True)
return output
class create(Resource):
def post(self, lang):
print(lang)
ret = func.create(lang)
return {"data": {'uuid': ret['uuid'], 'uuid_short': ret['uuid_short'], 'startup': ret['startup']}}
api.add_resource(create, "/create/<string:lang>")
class start(Resource):
def post(self, uuid, startup):
ret = func.start(uuid, startup)
return {"data": ret}
api.add_resource(start, "/start/<string:uuid>/<string:startup>")
class restart(Resource):
def post(self, uuid):
ret = func.restart(uuid)
return {"data": ret}
api.add_resource(restart, "/restart/<string:uuid>")
class stop(Resource):
def post(self, uuid):
ret = func.stop(uuid)
return {"data": ret}
api.add_resource(stop, "/stop/<string:uuid>")
class kill(Resource):
def post(self, uuid):
ret = func.kill(uuid)
return {"data": ret}
api.add_resource(kill, "/kill/<string:uuid>")
if __name__ == "__main__":
app.run(debug=True, host="127.0.0.1", port=5000)
|
import math
from future.utils import with_metaclass
class EventRegistry(object):
Events = {}
MetaEvents = {}
def register_event(cls, event, bases):
if (Event in bases) or (NoteEvent in bases):
assert event.statusmsg not in cls.Events, \
"Event %s already registered" % event.name
cls.Events[event.statusmsg] = event
elif (MetaEvent in bases) or (MetaEventWithText in bases):
if event.metacommand is not None:
assert event.metacommand not in cls.MetaEvents, \
"Event %s already registered" % event.name
cls.MetaEvents[event.metacommand] = event
else:
raise ValueError("Unknown bases class in event type: ", event.name)
register_event = classmethod(register_event)
class RegisterEventMeta(type):
def __init__(cls, name, bases, dict):
if name not in ['AbstractEvent', 'Event', 'MetaEvent', 'NoteEvent',
'MetaEventWithText']:
EventRegistry.register_event(cls, bases)
class AbstractEvent(with_metaclass(RegisterEventMeta,object)):
__slots__ = ['tick', 'data']
name = "Generic MIDI Event"
length = 0
statusmsg = 0x0
def __init__(self, **kw):
if type(self.length) == int:
defdata = [0] * self.length
else:
defdata = []
self.tick = 0
self.data = defdata
for key in kw:
setattr(self, key, kw[key])
def __lt__(self, other):
if self.tick < other.tick:
return True
return self.data < other.data
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.tick == other.tick and
self.data == other.data)
def __ne__(self, other):
return not self.__eq__(other)
def __baserepr__(self, keys=[]):
keys = ['tick'] + keys + ['data']
body = []
for key in keys:
val = getattr(self, key)
keyval = "%s=%r" % (key, val)
body.append(keyval)
body = str.join(', ', body)
return "midi.%s(%s)" % (self.__class__.__name__, body)
def __repr__(self):
return self.__baserepr__()
class Event(AbstractEvent):
__slots__ = ['channel']
name = 'Event'
def __init__(self, **kw):
if 'channel' not in kw:
kw = kw.copy()
kw['channel'] = 0
super(Event, self).__init__(**kw)
def copy(self, **kw):
_kw = {'channel': self.channel, 'tick': self.tick, 'data': self.data}
_kw.update(kw)
return self.__class__(**_kw)
def __lt__(self, other):
return (super(Event, self).__lt__(other) or
(super(Event, self).__eq__(other) and
self.channel < other.channel))
def __eq__(self, other):
return super(Event, self).__eq__(other) and \
self.channel == other.channel
def __repr__(self):
return self.__baserepr__(['channel'])
def is_event(cls, statusmsg):
return (cls.statusmsg == (statusmsg & 0xF0))
is_event = classmethod(is_event)
"""
MetaEvent is a special subclass of Event that is not meant to
be used as a concrete class. It defines a subset of Events known
as the Meta events.
"""
class MetaEvent(AbstractEvent):
statusmsg = 0xFF
metacommand = 0x0
name = 'Meta Event'
def is_event(cls, statusmsg):
return (statusmsg == 0xFF)
is_event = classmethod(is_event)
"""
NoteEvent is a special subclass of Event that is not meant to
be used as a concrete class. It defines the generalities of NoteOn
and NoteOff events.
"""
class NoteEvent(Event):
length = 2
def get_pitch(self):
return self.data[0]
def set_pitch(self, val):
self.data[0] = val
pitch = property(get_pitch, set_pitch)
def get_velocity(self):
return self.data[1]
def set_velocity(self, val):
self.data[1] = val
velocity = property(get_velocity, set_velocity)
class NoteOnEvent(NoteEvent):
statusmsg = 0x90
name = 'Note On'
class NoteOffEvent(NoteEvent):
statusmsg = 0x80
name = 'Note Off'
class AfterTouchEvent(Event):
statusmsg = 0xA0
length = 2
name = 'After Touch'
def get_pitch(self):
return self.data[0]
def set_pitch(self, val):
self.data[0] = val
pitch = property(get_pitch, set_pitch)
def get_value(self):
return self.data[1]
def set_value(self, val):
self.data[1] = val
value = property(get_value, set_value)
class ControlChangeEvent(Event):
statusmsg = 0xB0
length = 2
name = 'Control Change'
def set_control(self, val):
self.data[0] = val
def get_control(self):
return self.data[0]
control = property(get_control, set_control)
def set_value(self, val):
self.data[1] = val
def get_value(self):
return self.data[1]
value = property(get_value, set_value)
class ProgramChangeEvent(Event):
statusmsg = 0xC0
length = 1
name = 'Program Change'
def set_value(self, val):
self.data[0] = val
def get_value(self):
return self.data[0]
value = property(get_value, set_value)
class ChannelAfterTouchEvent(Event):
statusmsg = 0xD0
length = 1
name = 'Channel After Touch'
def set_value(self, val):
self.data[1] = val
def get_value(self):
return self.data[1]
value = property(get_value, set_value)
class PitchWheelEvent(Event):
statusmsg = 0xE0
length = 2
name = 'Pitch Wheel'
def get_pitch(self):
return ((self.data[1] << 7) | self.data[0]) - 0x2000
def set_pitch(self, pitch):
value = pitch + 0x2000
self.data[0] = value & 0x7F
self.data[1] = (value >> 7) & 0x7F
pitch = property(get_pitch, set_pitch)
class SysexEvent(Event):
statusmsg = 0xF0
name = 'SysEx'
length = 'varlen'
def is_event(cls, statusmsg):
return (cls.statusmsg == statusmsg)
is_event = classmethod(is_event)
class SequenceNumberMetaEvent(MetaEvent):
name = 'Sequence Number'
metacommand = 0x00
length = 2
class MetaEventWithText(MetaEvent):
def __init__(self, **kw):
super(MetaEventWithText, self).__init__(**kw)
if 'text' not in kw:
self.text = ''.join(chr(datum) for datum in self.data)
def __repr__(self):
return self.__baserepr__(['text'])
class TextMetaEvent(MetaEventWithText):
name = 'Text'
metacommand = 0x01
length = 'varlen'
class CopyrightMetaEvent(MetaEventWithText):
name = 'Copyright Notice'
metacommand = 0x02
length = 'varlen'
class TrackNameEvent(MetaEventWithText):
name = 'Track Name'
metacommand = 0x03
length = 'varlen'
class InstrumentNameEvent(MetaEventWithText):
name = 'Instrument Name'
metacommand = 0x04
length = 'varlen'
class LyricsEvent(MetaEventWithText):
name = 'Lyrics'
metacommand = 0x05
length = 'varlen'
class MarkerEvent(MetaEventWithText):
name = 'Marker'
metacommand = 0x06
length = 'varlen'
class CuePointEvent(MetaEventWithText):
name = 'Cue Point'
metacommand = 0x07
length = 'varlen'
class ProgramNameEvent(MetaEventWithText):
name = 'Program Name'
metacommand = 0x08
length = 'varlen'
class UnknownMetaEvent(MetaEvent):
name = 'Unknown'
# This class variable must be overriden by code calling the constructor,
# which sets a local variable of the same name to shadow the class variable.
metacommand = None
def __init__(self, **kw):
super(MetaEvent, self).__init__(**kw)
self.metacommand = kw['metacommand']
def copy(self, **kw):
kw['metacommand'] = self.metacommand
return super(UnknownMetaEvent, self).copy(kw)
class ChannelPrefixEvent(MetaEvent):
name = 'Channel Prefix'
metacommand = 0x20
length = 1
class PortEvent(MetaEvent):
name = 'MIDI Port/Cable'
metacommand = 0x21
class TrackLoopEvent(MetaEvent):
name = 'Track Loop'
metacommand = 0x2E
class EndOfTrackEvent(MetaEvent):
name = 'End of Track'
metacommand = 0x2F
class SetTempoEvent(MetaEvent):
name = 'Set Tempo'
metacommand = 0x51
length = 3
def set_bpm(self, bpm):
self.mpqn = int(float(6e7) / bpm)
def get_bpm(self):
return float(6e7) / self.mpqn
bpm = property(get_bpm, set_bpm)
def get_mpqn(self):
assert(len(self.data) == 3)
vals = [self.data[x] << (16 - (8 * x)) for x in range(3)]
return sum(vals)
def set_mpqn(self, val):
self.data = [(val >> (16 - (8 * x)) & 0xFF) for x in range(3)]
mpqn = property(get_mpqn, set_mpqn)
class SmpteOffsetEvent(MetaEvent):
name = 'SMPTE Offset'
metacommand = 0x54
class TimeSignatureEvent(MetaEvent):
name = 'Time Signature'
metacommand = 0x58
length = 4
def get_numerator(self):
return self.data[0]
def set_numerator(self, val):
self.data[0] = val
numerator = property(get_numerator, set_numerator)
def get_denominator(self):
return 2 ** self.data[1]
def set_denominator(self, val):
self.data[1] = int(math.log(val, 2))
denominator = property(get_denominator, set_denominator)
def get_metronome(self):
return self.data[2]
def set_metronome(self, val):
self.data[2] = val
metronome = property(get_metronome, set_metronome)
def get_thirtyseconds(self):
return self.data[3]
def set_thirtyseconds(self, val):
self.data[3] = val
thirtyseconds = property(get_thirtyseconds, set_thirtyseconds)
class KeySignatureEvent(MetaEvent):
name = 'Key Signature'
metacommand = 0x59
length = 2
def get_alternatives(self):
d = self.data[0]
return d - 256 if d > 127 else d
def set_alternatives(self, val):
self.data[0] = 256 + val if val < 0 else val
alternatives = property(get_alternatives, set_alternatives)
def get_minor(self):
return self.data[1]
def set_minor(self, val):
self.data[1] = val
minor = property(get_minor, set_minor)
class SequencerSpecificEvent(MetaEvent):
name = 'Sequencer Specific'
metacommand = 0x7F
|
import argparse
import io
import logging
import os
import tokenize
import ctypeslib.codegen.codegenerator
import ctypeslib.codegen.typedesc
def rewrite_ctypes_little_endian(readline):
prev_tokens = [None, None]
def rewrite_token(token):
if prev_tokens[0] is None or prev_tokens[1] is None:
return token
if prev_tokens[0].string != '.' or prev_tokens[1].string != 'ctypes':
return token
if token.string == 'Structure':
return (token.type, 'LittleEndianStructure') + token[2:]
elif token.string == 'Union':
return (token.type, 'LittleEndianUnion') + token[2:]
else:
return token
for token in tokenize.tokenize(readline):
yield rewrite_token(token)
prev_tokens[1] = prev_tokens[0]
prev_tokens[0] = token
def generate_ctypes(header_file, py_file, cpp_flags):
logging.info("Generating %s from %s", py_file, header_file)
buffer = io.StringIO()
ctypeslib.codegen.codegenerator.generate_code([header_file], buffer,
types=(ctypeslib.codegen.typedesc.Alias,
ctypeslib.codegen.typedesc.Structure,
ctypeslib.codegen.typedesc.Variable,
ctypeslib.codegen.typedesc.Enumeration,
ctypeslib.codegen.typedesc.Function,
ctypeslib.codegen.typedesc.Macro,
ctypeslib.codegen.typedesc.Typedef,
ctypeslib.codegen.typedesc.Union),
filter_location=True,
flags=cpp_flags)
bytes_buffer = io.BytesIO(buffer.getvalue().encode())
bytes = tokenize.untokenize(rewrite_ctypes_little_endian(bytes_buffer.readline))
with open(py_file, 'wb') as outfile:
outfile.write(bytes)
def run(kernel_dir, log_level):
logging.basicConfig(level=log_level)
kernel_dir = os.path.abspath(kernel_dir)
out_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src', 'amdgpu_pptable', 'generated')
powerplay_old_dir = os.path.join(kernel_dir, 'drivers/gpu/drm/amd/powerplay')
powerplay_new_dir = os.path.join(kernel_dir, 'drivers/gpu/drm/amd/pm/powerplay')
powerplay_dir = powerplay_old_dir if os.path.exists(powerplay_old_dir) else powerplay_new_dir
powerplay_inc_old_dir = os.path.join(powerplay_old_dir, 'inc')
powerplay_inc_new_dir = os.path.join(kernel_dir, 'drivers/gpu/drm/amd/pm/inc')
powerplay_inc_dir = powerplay_inc_old_dir if os.path.exists(powerplay_inc_old_dir) else powerplay_inc_new_dir
header_file = os.path.join(powerplay_dir, 'hwmgr/vega10_pptable.h')
includes = [
os.path.join(kernel_dir, 'drivers/gpu/drm/amd/include/atom-types.h'),
os.path.join(kernel_dir, 'drivers/gpu/drm/amd/include/atomfirmware.h')
]
py_file = os.path.join(out_dir, 'vega10_pptable.py')
cpp_flags = ['-include', 'stdint.h']
for inc in includes:
cpp_flags.extend(('-include', inc))
generate_ctypes(header_file, py_file, cpp_flags)
header_file = os.path.join(powerplay_dir, 'hwmgr/pptable_v1_0.h')
py_file = os.path.join(out_dir, 'pptable_v1_0.py')
includes = [
os.path.join(kernel_dir, 'drivers/gpu/drm/amd/include/atom-types.h'),
os.path.join(kernel_dir, 'drivers/gpu/drm/amd/include/atombios.h')
]
cpp_flags = ['-include', 'stdint.h']
for inc in includes:
cpp_flags.extend(('-include', inc))
generate_ctypes(header_file, py_file, cpp_flags)
header_file = os.path.join(powerplay_inc_dir, 'smu_v11_0_pptable.h')
py_file = os.path.join(out_dir, 'smu_v11_0_pptable_navi10.py')
includes = [
os.path.join(kernel_dir, 'drivers/gpu/drm/amd/include/atom-types.h'),
os.path.join(kernel_dir, 'drivers/gpu/drm/amd/include/atomfirmware.h'),
os.path.join(powerplay_inc_dir, 'smu11_driver_if_navi10.h')
]
cpp_flags = ['-include', 'stdint.h']
for inc in includes:
cpp_flags.extend(('-include', inc))
generate_ctypes(header_file, py_file, cpp_flags)
def main():
parser = argparse.ArgumentParser(
description="Generate Python modules from kernel headers"
)
parser.add_argument(
"-k", "--kernel-dir", required=True, help="kernel source directory"
)
parser.add_argument(
"--log-level", type=logging.getLevelName, default=logging.INFO
)
run(**vars(parser.parse_args()))
if __name__ == '__main__':
main()
|
import argparse
from gitlab_api_client import GitlabApi
from user_config import get_gitlab_api_client
from subprocess import check_call
def create_project_action(main_args, progname: str):
gitlab_instance = main_args.gitlab_instance
create_project_parser = argparse.ArgumentParser(description='Create new project',
prog=f'{progname} gitlab_instance create')
create_project_parser.add_argument('path',
help='path of project to create')
args = create_project_parser.parse_args(main_args.args)
gitlab_api_client = get_gitlab_api_client(gitlab_instance)
__create_project(gitlab_api_client, args.path)
def __create_project(gitlab_api_client: GitlabApi, path: str):
#gitlab_api_client.create_project(path)
groups = path.split("/")
path = groups.pop()
group = gitlab_api_client.get_namespace("/".join(groups))
repo = gitlab_api_client.create_project(path, group['id'])
print(f"Created repo with url {repo['ssh_url_to_repo']}")
print(f"Gitlab link: {repo['web_url']}")
check_call(args=['git', 'remote', 'add', 'origin', repo['ssh_url_to_repo']])
# git remote add github git@github.com:Unity-Group/hipchat-download-emoji.git
|
from django.shortcuts import render
from .models import Story
from . models import Product
from django.views import generic
from django.views.generic import TemplateView
from django.contrib.auth.models import User
class storyListView(generic.ListView):
model = Story
class storyDetailView(generic.DetailView):
model = Story
class profile(storyListView):
template_name = "catalog/profile.html"
def get_queryset(self):
return Story.objects.filter(Author=self.request.user)
class instructionsView(storyListView):
template_name = "instructions.html"
def get_queryset(self):
return Story.objects.filter(featured = True )
class productListView(generic.ListView):
model = Product
class productDetailView(generic.DetailView):
model = Product
#All the functions needed to create, update and delete storyies
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from PIL import Image #imports Pillow library
import request
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.core.files import File
import datetime
from .forms import NewStoryForm
def NewStory(request):
#if post request process the form
if request.method == 'POST':
#request.Files required for imagefield
form = NewStoryForm(request.POST, request.FILES)
if form.is_valid():
#saves all items this far but commit=false allows us to change Story.Author
Story = form.save()
if request.user.is_authenticated:
Story.Author = request.user
Story.save()
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('user-profile'))
else:
return HttpResponseRedirect(reverse('storyList'))
else:
form = NewStoryForm()
return render(request, 'catalog/story_form.html', {'form': form})
class storyUpdate(UpdateView):
model = Story
fields = ['title', 'photo', 'story']
class storyDelete(DeleteView):
model = Story
|
import pytest
from torch.optim import SGD as _SGD
from neuralpy.optimizer import SGD
# Possible values that are invalid
learning_rates = [-6, False, ""]
momentums = [-6, False, ""]
dampenings = ["asd", False, 3]
weight_decays = [-0.36, "asd", "", False]
nesteroves = [122, ""]
@pytest.mark.parametrize(
"learning_rate, momentum, dampening, weight_decay, nesterov",
[
(-6, 0.33, 0.333, 0.333, False),
("invalid", 0.33, 0.333, 0.333, False),
(0.00, 0.33, 0.333, 0.333, False),
(0.001, -6, 0.333, 0.333, False),
(0.001, False, 0.333, 0.333, False),
(0.001, 0.1, False, 0.333, False),
(0.001, 0.002, "invalid", 0.33, 122),
(0.001, 0.002, 0.376, False, 122),
(0.001, 0.002, 0.342, "test", 122),
(0.001, 0.002, 0.342, 0.1, 122),
(0.001, 0.002, 0.342, 0.1, "invalid"),
],
)
def test_sgd_should_throw_value_error(
learning_rate, momentum, dampening, weight_decay, nesterov
):
with pytest.raises(ValueError):
SGD(
learning_rate=learning_rate,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
# Possible values that are valid
learning_rates = [0.1, 0.002]
momentums = [0.1, 0.002]
dampenings = [0.35]
weight_decays = [0.1, 0.002]
nesteroves = [False, True]
@pytest.mark.parametrize(
"learning_rate, momentum, dampening, weight_decay, nesterov",
[
(learning_rate, momentum, dampening, weight_decay, nesterov)
for learning_rate in learning_rates
for momentum in momentums
for dampening in dampenings
for weight_decay in weight_decays
for nesterov in nesteroves
],
)
def test_sgd_get_layer_method(
learning_rate, momentum, dampening, weight_decay, nesterov
):
x = SGD(
learning_rate=learning_rate,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
details = x.get_optimizer()
assert isinstance(details, dict) is True
assert issubclass(details["optimizer"], _SGD) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["lr"] == learning_rate
assert details["keyword_arguments"]["momentum"] == momentum
assert details["keyword_arguments"]["dampening"] == dampening
assert details["keyword_arguments"]["weight_decay"] == weight_decay
assert details["keyword_arguments"]["nesterov"] == nesterov
def test_sgd_get_layer_method_without_parameter():
x = SGD()
details = x.get_optimizer()
assert isinstance(details, dict) is True
assert issubclass(details["optimizer"], _SGD) is True
assert isinstance(details["keyword_arguments"], dict) is True
assert details["keyword_arguments"]["lr"] == 0.001
assert details["keyword_arguments"]["momentum"] == 0.0
assert details["keyword_arguments"]["dampening"] == 0.0
assert details["keyword_arguments"]["weight_decay"] == 0.0
assert details["keyword_arguments"]["nesterov"] is False
|
'''
Load full XML text (FDSys sourced) data files on disk into PostgreSQL table.
For XML files. TXT files is a separate script.
Full text files should be downloaded like:
./run fdsys --collections=BILLS --congress=113 --store=xml,text --bulkdata=False
./run fdsys --collections=BILLS --congress=114 --store=xml,text --bulkdata=False
./run fdsys --collections=BILLS --congress=115 --store=xml,text --bulkdata=False
'''
import json
import os
import sqlalchemy
def main():
data_dir = '/home/ubuntu/andrew/congress-master/data'
user = 'ubuntu'
password = ''
dbname = 'congress'
host = 'localhost'
local_port = '5432'
es = "postgresql+psycopg2://"+user+":"+password+"@/"+dbname+"?host="+host+"&port="+local_port
engine = sqlalchemy.create_engine(es)
print(engine)
table = 'congress_full_raw_xml'
print(table)
load(data_dir,engine,table)
def load(data_dir,engine,table):
with engine.connect() as conn:
query = '''DROP TABLE IF EXISTS %s''' % (table)
print(query)
conn.execute(query)
query = '''CREATE TABLE %s
(data_id SERIAL NOT NULL PRIMARY KEY,
path TEXT,
data TEXT)
''' % (table)
print(query)
conn.execute(query)
for indx, (root, dirs, files) in enumerate(os.walk(os.path.expanduser(data_dir))):
for f in files:
fname = os.path.join(root, f)
if fname.endswith("document.xml"):
with open(fname) as myfile:
data_str = myfile.read()
escaped_data_str = data_str.replace('%','%%') # escape for postgres
query = '''INSERT INTO %s
(path,data)
VALUES
('%s',$$%s$$)
''' % (table,fname,escaped_data_str)
# print(query)
try:
conn.execute(query)
except:
print(query)
raise
if indx % 500 == 0:
print('Completed %d.' % indx)
query = '''select count(*) from %s;''' % (table)
print(query)
result = conn.execute(query)
print('Rows in table: %d' % (result.fetchone()[0]))
query = '''select count(*) from (select path from %s group by path) x;''' % (table)
print(query)
result = conn.execute(query)
print('Unique path rows in table: %d' % (result.fetchone()[0]))
if __name__ == '__main__':
main()
|
# Copyright 2018 Saphetor S.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jsonmodels import models, fields
__author__ = "ckopanos"
class CivicDetails(models.Base):
variant = fields.StringField(help_text="Variant", required=False, nullable=True)
variant_summary = fields.StringField(help_text="Variant summary", required=False, nullable=True)
variant_civic_url = fields.StringField(help_text="Variant CIViC URL", required=False, nullable=True)
variant_origin = fields.StringField(help_text="Variant origin", required=False, nullable=True)
pub_med_references = fields.ListField(items_types=(int, ), help_text="PubMed References", required=False, nullable=True)
clinical_significance = fields.StringField(help_text="Clinical significance", required=False, nullable=True)
evidence_level = fields.StringField(help_text="Evidence level", required=False, nullable=True)
evidence_statement = fields.StringField(help_text="Evidence statement", required=False, nullable=True)
evidence_type = fields.StringField(help_text="Evidence type", required=False, nullable=True)
evidence_status = fields.StringField(help_text="Evidence status", required=False, nullable=True)
evidence_direction = fields.StringField(help_text="Evidence direction", required=False, nullable=True)
evidence_civic_url = fields.StringField(help_text="Evidence CIViC URL", required=False, nullable=True)
drugs = fields.ListField(items_types=(str, ), help_text="Drugs", required=False, nullable=True)
transcripts = fields.ListField(items_types=(str, ), help_text="Transcripts", required=False, nullable=True)
representative_transcript = fields.StringField(help_text="Representative transcript", required=False, nullable=True)
disease = fields.StringField(help_text="Disease", required=False, nullable=True)
rating = fields.StringField(help_text="Rating", required=False, nullable=True)
gene = fields.StringField(help_text="Gene", required=False, nullable=True)
gene_civic_url = fields.StringField(help_text="Gene CIViC URL", required=False, nullable=True)
entrez_id = fields.StringField(help_text="Entrez ID", required=False, nullable=True)
doid = fields.StringField(help_text="DOID", required=False, nullable=True)
class Civic(models.Base):
version = fields.StringField(help_text="Version")
items = fields.ListField(help_text='Details', items_types=(CivicDetails, ))
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 16:10:58 2019
@author: pitonhik
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 27 15:45:06 2019
@author: pitonhik
"""
import cv2
import numpy as np
import tensorflow as tf
import sys
sys.path.append("..")
from utils import label_map_util
from utils import visualization_utils as vis_util
#import sql
import math
from imutils import paths
import numpy as np
import shutil
import imutils
import pickle
import cv2
import os
import time
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
#////////////////////
#///////////
#///////////Object_detection
#///////////
#////////////////////
import dlib
import cv2
import numpy as np
def reg():
print('введите свои данные')
name = input('введите ваше имя на Английском: ')
fam = input('введите вашу фамилию на Английском: ')
true = input('ваше имя: ' + str(name) + ' ваша фамилия: ' + str(fam) + ' y/n/exit')
if 'y' in true:
return [name,fam]
elif 'exit' in true:
return False
else:
reg()
def delet():
print('введите дааные удаляемого человека')
name = input('введите имя на Английском: ')
fam = input('введите фамилию на Английском: ')
true = input('имя: ' + str(name) + ' фамилия: ' + str(fam) + ' y/n')
if 'y' in true:
try:
sn=fam+'_'+name
shutil.rmtree('dataset/'+sn)
except:
return 'not fail'
elif 'exit' in true:
return False
else:
delet()
def fase_save(name):
print('программа снимает ваше лицо')
sn=name[1]+'_'+name[0]
os.mkdir('dataset/'+sn)
os.getcwd()
faceCascade = cv2.CascadeClassifier('face_detection_model/haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
i =0
foto = 0
fram = 0
while foto < 9:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
print(fram)
if fram > 0 and fram%80 ==0:
for (x, y, w, h) in faces:
cv2.imwrite("dataset/"+sn+"/0000"+ str(i) +".jpg", frame)
foto +=1
i += 1
# Display the resulting frame
fram+=1
print('готово ' + str(foto)+ '/9 фото')
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
def build():
dataset = 'dataset'
embeddings = 'output/embeddings.pickle'
detetor = 'face_detection_model'
em_model = 'openface_nn4.small2.v1.t7'
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([detetor, "deploy.prototxt"])
modelPath = os.path.sep.join([detetor,
"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(em_model)
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(dataset))
# initialize our lists of extracted facial embeddings and
# corresponding people names
knownEmbeddings = []
knownNames = []
# initialize the total number of faces processed
total = 0
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
try:
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the image, resize it to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
image = cv2.imread(imagePath)
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(image, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
detector.setInput(imageBlob)
detections = detector.forward()
# ensure at least one face was found
if len(detections) > 0:
# we're making the assumption that each image has only ONE
# face, so find the bounding box with the largest probability
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
# ensure that the detection with the largest probability also
# means our minimum probability test (thus helping filter out
# weak detections)
if confidence > 0:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI and grab the ROI dimensions
face = image[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# add the name of the person + corresponding face
# embedding to their respective lists
knownNames.append(name)
knownEmbeddings.append(vec.flatten())
total += 1
except:
False
print('image incorrect')
# dump the facial embeddings + names to disk
print("[INFO] serializing {} encodings...".format(total))
data = {"embeddings": knownEmbeddings, "names": knownNames}
f = open(embeddings, "wb")
f.write(pickle.dumps(data))
f.close()
def train():
embeddings = 'output/embeddings.pickle'
# load the face embeddings
print("[INFO] loading face embeddings...")
data = pickle.loads(open(embeddings, "rb").read())
# encode the labels
print("[INFO] encoding labels...")
le = LabelEncoder()
labels = le.fit_transform(data["names"])
# train the model used to accept the 128-d embeddings of the face and
# then produce the actual face recognition
print("[INFO] training model...")
recognizer = SVC(C=1.0, kernel="linear", probability=True)
recognizer.fit(data["embeddings"], labels)
rec = 'output/recognizer.pickle'
# write the actual face recognition model to disk
f = open(rec, "wb")
f.write(pickle.dumps(recognizer))
f.close()
lee = 'output/le.pickle'
# write the label encoder to disk
f = open(lee, "wb")
f.write(pickle.dumps(le))
f.close()
detetor ='face_detection_model'
em_model = 'openface_nn4.small2.v1.t7'
rec = 'output/recognizer.pickle'
lee ='output/le.pickle'
protoPath = os.path.sep.join([detetor, "deploy.prototxt"])
modelPath = os.path.sep.join([detetor,
"res10_300x300_ssd_iter_140000.caffemodel"])
detectorm = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
embedder = cv2.dnn.readNetFromTorch(em_model)
# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(rec, "rb").read())
le = pickle.loads(open(lee, "rb").read())
def detect(frame,detector):
global le
global recognizer
global embedder
# initialize the video stream, then allow the camera sensor to warm up
# start the FPS throughput estimator
# loop over frames from the video file stream
if True:
# grab the frame from the threaded video stream
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
detector.setInput(imageBlob)
detections = detector.forward()
inframe = []
obj ={}
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > 0.2:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# extract the face ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# construct a blob for the face ROI, then pass the blob
# through our face embedding model to obtain the 128-d
# quantification of the face
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,
(96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# perform classification to recognize the face
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# draw the bounding box of the face along with the
# associated probability
if proba > 0.2:
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
obj['name']= name
obj['ver'] = proba
obj['kord'] = [startX, startY,endX, endY]
inframe.append(obj)
else:
name = 'unknown'
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(frame, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
obj['name']= name
obj['ver'] = proba
obj['kord'] = [startX, startY,endX, endY]
inframe.append(obj)
#cv2.imshow("q", frame)
return inframe
def landmarks_to_np(landmarks, dtype="int"):
# 获取landmarks的数量
num = landmarks.num_parts
coords = np.zeros((num, 2), dtype=dtype)
for i in range(0, num):
coords[i] = (landmarks.part(i).x, landmarks.part(i).y)
# return the list of (x, y)-coordinates
return coords
def get_centers(img, landmarks):
# 线性回归
EYE_LEFT_OUTTER = landmarks[2]
EYE_LEFT_INNER = landmarks[3]
EYE_RIGHT_OUTTER = landmarks[0]
EYE_RIGHT_INNER = landmarks[1]
x = ((landmarks[0:4]).T)[0]
y = ((landmarks[0:4]).T)[1]
A = np.vstack([x, np.ones(len(x))]).T
k, b = np.linalg.lstsq(A, y, rcond=None)[0]
x_left = (EYE_LEFT_OUTTER[0]+EYE_LEFT_INNER[0])/2
x_right = (EYE_RIGHT_OUTTER[0]+EYE_RIGHT_INNER[0])/2
LEFT_EYE_CENTER = np.array([np.int32(x_left), np.int32(x_left*k+b)])
RIGHT_EYE_CENTER = np.array([np.int32(x_right), np.int32(x_right*k+b)])
pts = np.vstack((LEFT_EYE_CENTER,RIGHT_EYE_CENTER))
cv2.polylines(img, [pts], False, (255,0,0), 1) #画回归线
cv2.circle(img, (LEFT_EYE_CENTER[0],LEFT_EYE_CENTER[1]), 3, (0, 0, 255), -1)
cv2.circle(img, (RIGHT_EYE_CENTER[0],RIGHT_EYE_CENTER[1]), 3, (0, 0, 255), -1)
return LEFT_EYE_CENTER, RIGHT_EYE_CENTER
def get_aligned_face(img, left, right):
desired_w = 256
desired_h = 256
desired_dist = desired_w * 0.5
eyescenter = ((left[0]+right[0])*0.5 , (left[1]+right[1])*0.5)# 眉心
dx = right[0] - left[0]
dy = right[1] - left[1]
dist = np.sqrt(dx*dx + dy*dy)# 瞳距
scale = desired_dist / dist # 缩放比例
angle = np.degrees(np.arctan2(dy,dx)) # 旋转角度
M = cv2.getRotationMatrix2D(eyescenter,angle,scale)# 计算旋转矩阵
# update the translation component of the matrix
tX = desired_w * 0.5
tY = desired_h * 0.5
M[0, 2] += (tX - eyescenter[0])
M[1, 2] += (tY - eyescenter[1])
aligned_face = cv2.warpAffine(img,M,(desired_w,desired_h))
return aligned_face
def judge_eyeglass(img):
img = cv2.GaussianBlur(img, (11,11), 0) #高斯模糊
sobel_y = cv2.Sobel(img, cv2.CV_64F, 0 ,1 , ksize=-1) #y方向sobel边缘检测
sobel_y = cv2.convertScaleAbs(sobel_y) #转换回uint8类型
#cv2.imshow('sobel_y',sobel_y)
edgeness = sobel_y
#Otsu二值化
retVal,thresh = cv2.threshold(edgeness,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
d = len(thresh) * 0.5
x = np.int32(d * 6/7)
y = np.int32(d * 3/4)
w = np.int32(d * 2/7)
h = np.int32(d * 2/4)
x_2_1 = np.int32(d * 1/4)
x_2_2 = np.int32(d * 5/4)
w_2 = np.int32(d * 1/2)
y_2 = np.int32(d * 8/7)
h_2 = np.int32(d * 1/2)
roi_1 = thresh[y:y+h, x:x+w] #提取ROI
roi_2_1 = thresh[y_2:y_2+h_2, x_2_1:x_2_1+w_2]
roi_2_2 = thresh[y_2:y_2+h_2, x_2_2:x_2_2+w_2]
roi_2 = np.hstack([roi_2_1,roi_2_2])
measure_1 = sum(sum(roi_1/255)) / (np.shape(roi_1)[0] * np.shape(roi_1)[1])#计算评价值
measure_2 = sum(sum(roi_2/255)) / (np.shape(roi_2)[0] * np.shape(roi_2)[1])#计算评价值
measure = measure_1*0.3 + measure_2*0.7
#cv2.imshow('roi_1',roi_1)
#cv2.imshow('roi_2',roi_2)
print(measure)
if measure > 0.15:
judge = True
else:
judge = False
print(judge)
return judge
predictor_path = "./data/shape_predictor_5_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
def detect_glass(img):
global detector
global predictor
if True:
rez = []
obg ={}
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for i, rect in enumerate(rects):
obg ={}
x_face = rect.left()
y_face = rect.top()
w_face = rect.right() - x_face
h_face = rect.bottom() - y_face
obg['kord']=[x_face,y_face,x_face+w_face,y_face+h_face]
'''cv2.rectangle(img, (x_face,y_face), (x_face+w_face,y_face+h_face), (0,255,0), 2)
cv2.putText(img, "Face #{}".format(i + 1), (x_face - 10, y_face - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA)'''
landmarks = predictor(gray, rect)
landmarks = landmarks_to_np(landmarks)
for (x, y) in landmarks:
cv2.circle(img, (x, y), 2, (0, 0, 255), -1)
LEFT_EYE_CENTER, RIGHT_EYE_CENTER = get_centers(img, landmarks)
aligned_face = get_aligned_face(gray, LEFT_EYE_CENTER, RIGHT_EYE_CENTER)
#cv2.imshow("aligned_face #{}".format(i + 1), aligned_face)
judge = judge_eyeglass(aligned_face)
if judge == True:
obg['glass'] = True
#cv2.putText(img, "With Glasses", (x_face + 100, y_face - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2, cv2.LINE_AA)
else:
obg['glass'] = False
#cv2.putText(img, "No Glasses", (x_face + 100, y_face - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2, cv2.LINE_AA)'''
rez.append(obg)
del(obg)
return rez
#cv2.imshow("Result", img)
last_f = []
def run_models_recognition(path):
global detectorm
global last_f
kolvo = 0
mail = '164123622@mail.ru'
path = str(path)
PATH_TO_CKPT = 'modelFaster/frozen_inference_graph.pb'
PATH_TO_LABELS = 'modelFaster/Labelmap.pbtxt'
NUM_CLASSES = 2
files = ["C:\\tensorflow1\Done_Project_Faster_R-CNN_Caffe\Output\pdf"]
# Load the label map.
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
print('PATH_TO_LABELS='+PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.compat.v1.Session(graph=detection_graph)
print('PATH_TO_CKPT='+PATH_TO_CKPT)
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
#////////////////////
#///////////
#///////////Real_time_object_detection
#///////////
#////////////////////
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
net = cv2.dnn.readNetFromCaffe("modelCaffe/MobileNetSSD_deploy.prototxt.txt", "modelCaffe/MobileNetSSD_deploy.caffemodel")
print("modelCaffe/MobileNetSSD_deploy.prototxt.txt")
print("modelCaffe/MobileNetSSD_deploy.caffemodel")
print("path: " + path)
if path == '0':
PATH_TO_VIDEO = 0
else:
PATH_TO_VIDEO = path
# Open video file
print(PATH_TO_VIDEO)
video = cv2.VideoCapture(PATH_TO_VIDEO)
w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('(' + str(w) + ',' + str(h) + ')')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('Output\TestVideo1.avi', fourcc, 5, (w, h))
iteration_Count = 0
Col_pers = 0
Sanded = False
sh = 0
while(video.isOpened()):
sh+=1
ret , frame = video.read()
glass = detect_glass(frame)
det =detect(frame,detectorm)
centroids = []
myrez = []
Helmet = False
Jacket = False
Person = False
#Caffe model///////////////////////////////////////////////////////////////////////////////////////////////////////////////
Resized = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(Resized, 0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.3:
idx = int(detections[0, 0, i, 1])
if idx == 15:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
myrez.append([CLASSES[idx], startX, startY ,endX, endY])
#Faster R-CNN//////////////////////////////////////////////////////////////////////////////////////////////////////////////
frame_expanded = np.expand_dims(frame, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
# Draw the results of the detection (aka 'visulaize the results')
myimg , rez= vis_util.visualize_boxes_and_labels_on_image_array(
frame,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
if len(rez)>0:
lang = len(rez)
#print(lang)
for i in range(lang):
widt = int(rez[i][1]) + ((int(rez[i][2]) - int(rez[i][1]))/2)
heidt = int(rez[i][3]) + ((int(rez[i][4]) - int(rez[i][3]))/2)
centroids.append([rez[i][0], widt, heidt])
helmet =[]
jasket =[]
for j in range(len(rez)):
if rez[j][0]=='Chartreuse':
jasket.append(rez[j])
else:
helmet.append(rez[j])
leng = max(len(glass),len(det))
lend = max(leng,len(myrez))
main_mas_obg =[]
main_obg={}
for i in range(len(myrez)):
main_obg={}
main_obg['person'] = myrez[i][1:]
main_obg['helmet']=False
main_obg['jasket']=False
main_obg['glass']=False
main_obg['name'] = 'unknown'
senter = [main_obg['person'][0] + (main_obg['person'][2]-main_obg['person'][0])/2,main_obg['person'][1] + (main_obg['person'][3]-main_obg['person'][1])/2]
#print('sender')
##print('llllllllllllllll')
#print(len(helmet))
#print(len(jasket))
for j in range(len(helmet)):
hsender = [helmet[j][1] + (helmet[j][3]-helmet[j][1])/2,helmet[j][2] + (helmet[j][4]-helmet[j][2])/2]
#print('hsender')
#print(hsender)
lengt = math.sqrt((hsender[0]-senter[0])*(hsender[0]-senter[0])+(hsender[1]-senter[1])*(hsender[1]-senter[1]))
#print(lengt)
if lengt < 300:
main_obg['helmet']=True
for j in range(len(jasket)):
hsender = [jasket[j][1] + (jasket[j][3]-jasket[j][1])/2,jasket[j][2] + (jasket[j][4]-jasket[j][2])/2]
#print('aaa')
#print(hsender)
lengt = math.sqrt((hsender[0]-senter[0])*(hsender[0]-senter[0])+(hsender[1]-senter[1])*(hsender[1]-senter[1]))
#print(lengt)
if lengt < 300:
main_obg['jasket']=True
for j in range(len(glass)):
hsender = [glass[j]['kord'][0] + (glass[j]['kord'][2]-glass[j]['kord'][0])/2,glass[j]['kord'][1] + (glass[j]['kord'][3]-glass[j]['kord'][1])/2]
lengt = math.sqrt((hsender[0]-senter[0])*(hsender[0]-senter[0])+(hsender[1]-senter[1])*(hsender[1]-senter[1]))
if lengt < 300:
main_obg['glass'] = glass[j]['glass']
break
for j in range(len(det)):
hsender = [det[j]['kord'][0] + (det[j]['kord'][2]-det[j]['kord'][0])/2,det[j]['kord'][1] + (det[j]['kord'][3]-det[j]['kord'][1])/2]
lengt = math.sqrt((hsender[0]-senter[0])*(hsender[0]-senter[0])+(hsender[1]-senter[1])*(hsender[1]-senter[1]))
if lengt < 300:
main_obg['name'] = det[j]['name']
break
for j in range(len(det)):
cv2.rectangle(frame, (det[j]['kord'][0], det[j]['kord'][1]), (det[j]['kord'][2], det[j]['kord'][3]),(0, 0, 255), 2)
text = det[j]['name']
cv2.putText(frame, text, (det[j]['kord'][0], det[j]['kord'][1]-15), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
main_mas_obg.append(main_obg)
del(main_obg)
#print('////////////////')
print(main_mas_obg)
if sh==1:
kol = len(main_mas_obg)
else:
asd = len(main_mas_obg) - len(last_f)
if asd > 0:
kol+=asd
line = 500
for j in range(len(main_mas_obg)):
nar = 0
if main_mas_obg[j]['helmet']==False:
'''ЗАНОС В БД'''
print('нет каски у ' + str(main_mas_obg[j]['name']))
nar+=1
if main_mas_obg[j]['jasket']==False:
'''ЗАНОС В БД'''
print('нет жилета у ' + str(main_mas_obg[j]['name']))
nar+=1
if main_mas_obg[j]['glass']==False:
'''ЗАНОС В БД'''
print('нет очков у ' + str(main_mas_obg[j]['name']))
nar+=1
if main_mas_obg[j]['person'][2]>line:
'''ЗАНОС В БД'''
print('пересечение линии или зоны '+ str(main_mas_obg[j]['name']))
nar+=1
if nar > 0 and main_mas_obg[j]['name'] == 'kar_tima':
image = 'img.jpg'
cv2.imwrite(image, frame)
#sql.input_sql(image)
last_f = main_mas_obg
out.write(frame)
cv2.imshow('Object detector', frame)
#print('--------------------')
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
video.release()
out.release()
cv2.destroyAllWindows()
return kol
|
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
import gettext
PluginLanguageDomain = "NetworkBrowser"
PluginLanguagePath = "SystemPlugins/NetworkBrowser/locale"
def localeInit():
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
print "[NetworkBrowser] fallback to default translation for", txt
t = gettext.gettext(txt)
return t
localeInit()
language.addCallback(localeInit)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import common
import numpy as np
import unittest
from trajectory import Trajectory
from constraints import *
from measurements import get_measurements, create_anchors
class TestGeometry(unittest.TestCase):
def setUp(self):
self.traj = Trajectory(n_complexity=5, dim=2)
self.n_anchors = 4
self.basis = []
self.D_topright = []
def set_measurements(self, seed=None):
# random trajectory and anchors
self.traj.set_coeffs(seed=seed)
if seed is not None:
np.random.seed(seed)
self.anchors = create_anchors(self.traj.dim, self.n_anchors)
# get measurements
self.basis, self.D_topright = get_measurements(self.traj, self.anchors, seed=seed, n_samples=20)
def test_constraints(self):
""" Check the correct trajectory satisfies constraints. """
for i in range(100):
self.set_measurements(i)
#check the correct trajectory satisfies constraints
e_ds, e_dprimes, deltas = get_constraints_identity(self.traj.n_complexity)
for e_d, e_dprime, delta in zip(e_ds, e_dprimes, deltas):
np.testing.assert_equal(e_d.T @ self.traj.Z_opt @ e_dprime, delta)
t_mns, D_mns = get_constraints_D(self.D_topright, self.anchors, self.basis)
for t_mn, D_topright_mn in zip(t_mns, D_mns):
t_mn = np.array(t_mn)
np.testing.assert_almost_equal(t_mn.T @ self.traj.Z_opt @ t_mn, D_topright_mn)
tmp = t_mn @ t_mn.T
A = tmp.flatten()
self.assertAlmostEqual(A @ (self.traj.Z_opt).flatten(), D_topright_mn)
# test vectorized form of both constraints
A, b = get_constraints_identity(self.traj.n_complexity, vectorized=True)
np.testing.assert_array_almost_equal(A @ self.traj.Z_opt.flatten(), b)
A, b = get_constraints_D(self.D_topright, self.anchors, self.basis, vectorized=True, A=A, b=b)
np.testing.assert_array_almost_equal(A @ self.traj.Z_opt.flatten(), b)
A, b = get_constraints_symmetry(self.traj.n_complexity, vectorized=True)
np.testing.assert_array_almost_equal(A @ self.traj.Z_opt.flatten(), b)
def test_C_constraints(self):
for i in range(100):
self.set_measurements(i)
L = self.traj.coeffs.T.dot(self.traj.coeffs)
T_A, T_B, b = get_C_constraints(self.D_topright, self.anchors, self.basis)
T = np.c_[T_A, -T_B / 2]
x = np.r_[self.traj.coeffs.flatten(), L.flatten()]
np.testing.assert_array_almost_equal(T @ x, b)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from netforce.database import get_connection
from datetime import *
import time
class Campaign(Model):
_name = "mkt.campaign"
_string = "Campaign"
_fields = {
"name": fields.Char("Campaign Name", required=True, search=True),
"date": fields.Date("Date", required=True, search=True),
"target_lists": fields.Many2Many("mkt.target.list", "Target Lists"),
"email_tmpl_id": fields.Many2One("email.template", "Email Template"),
"mailbox_id": fields.Many2One("email.mailbox", "Email Mailbox"),
"comments": fields.One2Many("message", "related_id", "Comments"),
"state": fields.Selection([["active", "Active"], ["inactive", "Inactive"]], "Status", required=True),
"limit_day": fields.Integer("Daily Limit"),
"limit_hour": fields.Integer("Hourly Limit"),
"num_targets": fields.Integer("Number targets", function="get_stats", function_multi=True),
"num_create": fields.Integer("Number emails created", function="get_stats", function_multi=True),
"percent_create": fields.Float("% created", function="get_stats", function_multi=True),
"num_sent": fields.Integer("Number emails sent", function="get_stats", function_multi=True),
"percent_sent": fields.Float("% sent", function="get_stats", function_multi=True),
"num_delivered": fields.Integer("Number emails delivered", function="get_stats", function_multi=True),
"percent_delivered": fields.Float("% delivered", function="get_stats", function_multi=True),
"num_bounced": fields.Integer("Number emails bounced", function="get_stats", function_multi=True),
"percent_bounced": fields.Float("% bounced", function="get_stats", function_multi=True),
"num_rejected": fields.Integer("Number emails rejected", function="get_stats", function_multi=True),
"percent_rejected": fields.Float("% rejected", function="get_stats", function_multi=True),
"num_opened": fields.Integer("Number emails opened", function="get_stats", function_multi=True),
"percent_opened": fields.Float("% opened", function="get_stats", function_multi=True),
"num_clicked": fields.Integer("Number emails clicked", function="get_stats", function_multi=True),
"percent_clicked": fields.Float("% clicked", function="get_stats", function_multi=True),
"num_create_day": fields.Integer("Emails created within day", function="get_stats", function_multi=True),
"num_create_hour": fields.Integer("Emails created within hour", function="get_stats", function_multi=True),
"emails": fields.One2Many("email.message", "related_id", "Emails"),
"min_target_life": fields.Integer("Minimum Target Life (days)"),
}
_defaults = {
"date": lambda *a: time.strftime("%Y-%m-%d"),
"state": "active",
}
def create_emails_all(self, context={}):
for obj in self.search_browse([["state", "=", "active"]]):
obj.create_emails()
def create_emails(self, ids, context={}):
obj = self.browse(ids)[0]
if obj.state != "active":
raise Exception("Invalid state")
if not obj.email_tmpl_id:
raise Exception("Missing email template")
limit = None
if obj.limit_day:
limit = obj.limit_day - obj.num_create_day
if obj.limit_hour:
l = obj.limit_hour - obj.num_create_hour
if limit is None or l < limit:
limit = l
sent_emails = set()
for email in obj.emails:
if not email.name_id:
continue
if email.name_id._model != "mkt.target":
continue
target_id = email.name_id.id
res = get_model("mkt.target").search([["id", "=", email.name_id.id]]) # XXX
if not res:
continue
target = get_model("mkt.target").browse(target_id)
sent_emails.add(target.email)
count = 0
for tl in obj.target_lists:
for target in tl.targets:
if target.email in sent_emails:
continue
if obj.min_target_life and target.target_life < obj.min_target_life:
continue
if limit is not None and count >= limit:
break
settings = get_model("settings").browse(1)
data = {
"settings": settings,
"obj": target,
}
obj.email_tmpl_id.create_email(
data, name_id="mkt.target,%d" % target.id, related_id="mkt.campaign,%d" % obj.id, mailbox_id=obj.mailbox_id)
count += 1
db = get_connection()
db.commit()
return {
"next": {
"name": "campaign",
"mode": "form",
"active_id": obj.id,
},
"flash": "%d emails created" % count,
}
def get_stats(self, ids, context={}):
vals = {}
for obj_id in ids:
vals[obj_id] = {
"num_targets": 0,
"num_create": 0,
"num_sent": 0,
"num_delivered": 0,
"num_bounced": 0,
"num_rejected": 0,
"num_opened": 0,
"num_clicked": 0,
"num_create_day": 0,
"num_create_hour": 0,
}
db = get_connection()
res = db.query(
"SELECT c.id,COUNT(DISTINCT t.email) FROM mkt_campaign c JOIN m2m_mkt_campaign_mkt_target_list r ON r.mkt_campaign_id=c.id JOIN mkt_target t ON t.list_id=r.mkt_target_list_id WHERE c.id IN %s GROUP BY c.id", tuple(ids))
for r in res:
vals[r.id]["num_targets"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_create"] = r.count
d = (datetime.now() - timedelta(hours=24)).strftime("%Y-%m-%d %H:%M:%S")
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND date>%s GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]), d)
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_create_day"] = r.count
d = (datetime.now() - timedelta(hours=1)).strftime("%Y-%m-%d %H:%M:%S")
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND date>%s GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]), d)
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_create_hour"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='sent' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_sent"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='delivered' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_delivered"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='bounced' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_bounced"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND state='rejected' GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_rejected"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND opened GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_opened"] = r.count
res = db.query("SELECT related_id,COUNT(*) FROM email_message WHERE related_id IN %s AND clicked GROUP BY related_id",
tuple(["mkt.campaign,%d" % x for x in ids]))
for r in res:
obj_id = int(r.related_id.split(",")[1])
v = vals[obj_id]
v["num_clicked"] = r.count
for obj in self.browse(ids):
v = vals[obj.id]
v["percent_create"] = v["num_create"] * 100.0 / v["num_targets"] if v["num_targets"] else None
v["percent_sent"] = v["num_sent"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_delivered"] = v["num_delivered"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_bounced"] = v["num_bounced"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_rejected"] = v["num_rejected"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_opened"] = v["num_opened"] * 100.0 / v["num_create"] if v["num_create"] else None
v["percent_clicked"] = v["num_clicked"] * 100.0 / v["num_create"] if v["num_create"] else None
return vals
Campaign.register()
|
from __future__ import print_function, division
from torch.nn.modules.loss import _assert_no_grad, _Loss
import torch.nn.functional as F
import torch
# define a customized loss function for future development
class WeightedBCELoss(_Loss):
def __init__(self, size_average=True, reduce=True):
super(WeightedBCELoss, self).__init__(size_average, reduce)
def forward(self, input, target, weight):
_assert_no_grad(target)
return F.binary_cross_entropy(input, target, weight, self.size_average,
self.reduce)
# Weighted binary cross entropy + Dice loss
class BCLoss(_Loss):
def __init__(self, size_average=True, reduce=True):
super(BCLoss, self).__init__(size_average, reduce)
def dice_loss(self, input, target):
smooth = 1.
loss = 0.
for index in range(input.size()[0]):
iflat = input[index].view(-1)
tflat = target[index].view(-1)
intersection = (iflat * tflat).sum()
loss += 1 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth))
# size_average=True for the dice loss
return loss / float(input.size()[0])
def forward(self, input, target, weight):
_assert_no_grad(target)
"""
Weighted binary classification loss + Dice coefficient loss
"""
loss1 = F.binary_cross_entropy(input, target, weight, self.size_average,
self.reduce)
loss2 = self.dice_loss(input, target)
return loss1, loss2
# Focal Loss
class FocalLoss(_Loss):
def __init__(self, size_average=True, reduce=True):
super().__init__(size_average, reduce)
def focal_loss(self, input, target, weight):
gamma = 2
eps = 1e-7
loss = 0.
for index in range(input.size()[0]):
iflat = input[index].view(-1)
tflat = target[index].view(-1)
wflat = weight[index].view(-1)
iflat = iflat.clamp(eps, 1.0 - eps)
fc_loss_pos = -1 * tflat * torch.log(iflat) * (1 - iflat) ** gamma
fc_loss_neg = -1 * (1-tflat) * torch.log(1 - iflat) * (iflat) ** gamma
fc_loss = fc_loss_pos + fc_loss_neg
fc_loss = fc_loss * wflat # weighted focal loss
loss += fc_loss.mean()
return loss / float(input.size()[0])
def forward(self, input, target, weight):
_assert_no_grad(target)
"""
Weighted Focal Loss
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
loss = self.focal_loss(input, target, weight)
return loss
# Focal Loss + Dice Loss
class BCLoss_focal(_Loss):
def __init__(self, size_average=True, reduce=True):
super().__init__(size_average, reduce)
def dice_loss(self, input, target):
smooth = 1.
loss = 0.
for index in range(input.size()[0]):
iflat = input[index].view(-1)
tflat = target[index].view(-1)
intersection = (iflat * tflat).sum()
loss += 1 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth))
# size_average=True for the dice loss
return loss / float(input.size()[0])
def focal_loss(self, input, target, weight):
gamma = 2
eps = 1e-7
loss = 0.
for index in range(input.size()[0]):
iflat = input[index].view(-1)
tflat = target[index].view(-1)
wflat = weight[index].view(-1)
iflat = iflat.clamp(eps, 1.0 - eps)
fc_loss_pos = -1 * tflat * torch.log(iflat) * (1 - iflat) ** gamma
fc_loss_neg = -1 * (1-tflat) * torch.log(1 - iflat) * (iflat) ** gamma
fc_loss = fc_loss_pos + fc_loss_neg
fc_loss = fc_loss * wflat # weighted focal loss
loss += fc_loss.mean()
return loss / float(input.size()[0])
def forward(self, input, target, weight):
_assert_no_grad(target)
"""
Weighted binary classification loss + Dice coefficient loss
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
loss1 = self.focal_loss(input, target, weight)
loss2 = self.dice_loss(input, target)
return loss1, loss2
|
#!/usr/bin/python3
"""The module is to create ziti identity and eroll it for the loacal endpoint."""
from os.path import expanduser
from json import loads
import traceback
import argparse
import logging
from subprocess import Popen, PIPE
from requests import post, get
from sys import exit
def restful(url, rest_method, headers, payload=None):
"""
Make http request to a given url and return json data.
Paramters
---------
url :
rest_method :
headers :
payload :
Returns
-------
request.content : in json form
"""
if payload:
request = rest_method(url=url, data=payload, headers=headers, verify=False)
else:
request = rest_method(url=url, headers=headers, verify=False)
data = loads(request.content)['data']
code = request.status_code
return data, code
def ziti_authenticate(controller_ip, username, password):
"""Authenticate to Ziti Controller and get a session token."""
# login to controller to gain a session token
try:
url = 'https://%s:1280/authenticate?method=password' % controller_ip
payload = "{\n \"username\": \"%s\",\n \"password\": \"%s\"\n}" \
% (username, password)
headers = {"content-type": "application/json"}
response_data = restful(url, post, headers, payload)
logging.info(response_data[1])
return response_data[0]['token']
except Exception as excpt:
logging.error(str(excpt))
logging.debug(traceback.format_exc())
return None
def debug(debug=False):
"""Enable required debug."""
# enable debug if requested
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# configure logging
try:
logging.basicConfig(filename=expanduser("~/update_indentity.log"),
format='%(asctime)s-ziti-%(levelname)s,%(message)s',
datefmt='%Y-%m-%d-%H:%M:%S',
level=log_level)
except Exception as excpt:
print('enroll-ziti-tunneler: '+str(excpt))
# write separator in log file if debug has been enabled.
logging.debug("----------------debug-enabled----------------")
def create_url(controller_ip, endpoint):
"""Create endpoint url to POST/PUT/GET/DELTE against."""
return 'https://%s:1280/%s' % (controller_ip, endpoint)
def create_headers(session_token):
"""Create headers to use with POST/PUT/GET/DELTE."""
return {"content-type": "application/json", "zt-session": session_token}
def ziti_tunnel():
"""Enroll Ziti Tunneller."""
# login to controller to gain a session token
session_token = ziti_authenticate(args.controller_ip, args.username, args.password)
if not session_token:
exit(1)
# create an identity
try:
payload = "{\"name\":\"%s\",\"type\":\"Device\",\"isAdmin\":false,\"roleAttributes\":[],\
\"enrollment\":{\"ott\":true}}" % args.identity_name
response_data = restful(create_url(args.controller_ip, "identities"),
post, create_headers(session_token), payload)
logging.info(response_data[1])
identity_id = response_data[0]['id']
except Exception as excpt:
logging.error(str(excpt))
logging.debug(traceback.format_exc())
# download jwt token
try:
response_data = restful(create_url(args.controller_ip, "identities/"+identity_id),
get, create_headers(session_token))
logging.info(response_data[1])
jwt_file = "%s/.config/ziti/ziti-identities/%s.jwt" \
% (args.home_directory, args.identity_name)
with open(jwt_file, "w") as f:
f.write(response_data[0]['enrollment']['ott']['jwt'])
except Exception as excpt:
logging.error(str(excpt))
logging.debug(traceback.format_exc())
# enroll identity
try:
cmd = ['/usr/local/bin/ziti-enroller', '-j', jwt_file]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
output, error = proc.communicate()
if output:
logging.info(output)
if error:
logging.error(error)
except Exception as excpt:
logging.error(str(excpt))
logging.debug(traceback.format_exc())
def version():
"""Show version of this module when asked."""
print('1.0.0')
if __name__ == '__main__':
"""Parse arguments from command line."""
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true',
help='enable debug log in log file output')
parser.add_argument('-hd', '--home_directory', default='/home/nfadmin',
help='set home directory. Default: /home/nfadmin')
parser.add_argument('-v', '--version', action='version',
version='1.0.0')
parser.add_argument('-u', '--username', default='admin',
help='controller username, default is admin')
parser.add_argument('-p', '--password', required='yes', help='controller password')
parser.add_argument('-cip', '--controller_ip', required='yes', help='controller ip')
parser.add_argument('-i', '--identity_name', required='yes', help='identity for an endpoint')
# get arguments
args = parser.parse_args()
if args.debug:
debug(args.debug)
else:
debug()
ziti_tunnel()
|
from itertools import *
r1 = range(3)
r2 = range(2)
print('zip stops early:')
print(list(zip(r1,r2)))
r1 = range(3)
r2 = range(2)
print('\nzip_longest processes all of the values')
print(list(zip_longest(r1,r2)))
|
"""
======================================
Probabilistic Classifier Chain Example
======================================
An example of :class:`skml.problem_transformation.ProbabilisticClassifierChain`
"""
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import numpy as np
from skml.problem_transformation import ProbabilisticClassifierChain
from skml.datasets import load_dataset
X, y = load_dataset('yeast')
# sample down the label space to make the example faster.
# you shouldn't do this on your own data though!
y = y[:, :6]
X_train, X_test, y_train, y_test = train_test_split(X, y)
pcc = ProbabilisticClassifierChain(LogisticRegression())
pcc.fit(X_train, y_train)
y_pred = pcc.predict(X_test)
print("hamming loss: ")
print(hamming_loss(y_test, y_pred))
print("accuracy:")
print(accuracy_score(y_test, y_pred))
print("f1 score:")
print("micro")
print(f1_score(y_test, y_pred, average='micro'))
print("macro")
print(f1_score(y_test, y_pred, average='macro'))
print("precision:")
print("micro")
print(precision_score(y_test, y_pred, average='micro'))
print("macro")
print(precision_score(y_test, y_pred, average='macro'))
print("recall:")
print("micro")
print(recall_score(y_test, y_pred, average='micro'))
print("macro")
print(recall_score(y_test, y_pred, average='macro'))
|
import enum
import discord
class MessageType(enum.Enum):
Plain = enum.auto()
Embed = enum.auto()
Error = enum.auto()
Confirmation = enum.auto()
Success = enum.auto()
class Reply:
def __init__(self, message: str, type: MessageType = MessageType.Plain):
self.message = message
self.type = type
self._embed: discord.Embed = None
@property
def embed(self):
return self._embed
@embed.setter
def embed(self, embed):
self._embed = embed
if self._embed.description == discord.Embed.Empty and self.message != None:
self._embed.description = self.message
@staticmethod
def CreateError(message: str):
res = Reply(message, MessageType.Error)
res.embed = discord.Embed(color=0xB30C00, title="Error")
return res
@staticmethod
def CreateSuccess(message: str):
res = Reply(message, MessageType.Success)
res.embed = discord.Embed(color=0x00B34A, title="Success")
return res
@staticmethod
def CreateConfirmation(message: str):
res = Reply(message, MessageType.Confirmation)
res.embed = discord.Embed(color=0xFF9D00, title="Please confirm")
return res
@staticmethod
def CreateEmbed(message: str, title: str):
res = Reply(message, MessageType.Embed)
res.embed = discord.Embed(color=0x095DB3, title=title)
return res
@staticmethod
def CreatePlain(message: str):
res = Reply(message, MessageType.Plain)
return res
|
from runner.run_description import RunDescription, Experiment, ParamGrid
_params = ParamGrid([
])
_experiments = [
Experiment(
'battle2_fs4',
'python -m algorithms.appo.train_appo --env=doom_battle2 --train_for_env_steps=3000000000 --algo=APPO --env_frameskip=4 --use_rnn=True --ppo_epochs=1 --rollout=32 --recurrence=32 --macro_batch=2048 --batch_size=2048 --wide_aspect_ratio=False --num_workers=72 --num_envs_per_worker=30 --num_policies=8 --with_pbt=True',
_params.generate_params(randomize=False),
),
]
RUN_DESCRIPTION = RunDescription('paper_doom_battle2_appo_pbt_v65_fs4', experiments=_experiments)
|
# coding: utf-8
import re
import wcwidth
import numpy as np
from ._colorings import _toCOLOR_create
from .generic_utils import handleKeyError, handleTypeError
f_aligns = ["<", ">", "=", "^", "left", "right", "center"]
f_signs = ["+", "-", " ", ""]
f_grouping_options = ["_", ",", ""]
f_types = ["b", "c", "d", "e", "E", "f", "F", "g", "G", "n", "o", "s", "x", "X", "%"]
invisible_codes = re.compile(r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
invisible_codes_bytes = re.compile(b"\x1b\\[\\d+\\[;\\d]*m|\x1b\\[\\d*;\\d*;\\d*m")
def format_spec_create(align=">", sign="", zero_padding=False, width=0, grouping_option="", fmt=""):
"""Create a function which returns a formatted text.
``format_spec = [[fill]align][sign][#][0][width][grouping_option][.precision][type]``
Args:
width
align (str) : [[fill]align] One of ``["<", ">", "=", "^"]``
sign (str) : [sign] One of ``["+", "-", " ", ""]``
zero_padding (bool) : [0]
width (int) : [width]
grouping_option (str) : [grouping_option] One of ``["_", ",", ""]``
fmt (str) : [.precision][type] One of ``["b", "c", "d", "e", "E", "f", "F", "g", "G", "n", "o", "s", "x", "X", "%"]``
Returns:
format_spec: lambda (`<function __main__.<lambda>(fill)>`)
References:
- `Python Source Code <https://github.com/python/cpython/blob/3.8/Lib/string.py>`_
- `Python Documentation <https://docs.python.org/3/library/string.html#format-specification-mini-language>`_
Examples:
>>> from pycharmers.utils import format_spec_create
>>> format_spec = format_spec_create(width=10, align="^")
>>> format_spec("hoge")
' hoge '
>>> format_spec = format_spec_create(align="<", fmt=".1%")
>>> format_spec(1/3)
'33.3%'
>>> format_spec = format_spec_create(align=">", zero_padding=True, fmt="b")
>>> format_spec(20)
'10100'
"""
handleKeyError(lst=f_aligns, align=align)
handleKeyError(lst=f_signs, sign=sign)
handleKeyError(lst=f_grouping_options, grouping_option=grouping_option)
if len(fmt)>0:
handleKeyError(lst=f_types, fmt=fmt[-1])
zero = "0" if zero_padding else ""
handleTypeError(types=[int], width=width)
return lambda fill : align_text(f"{fill:{sign}{zero}{grouping_option}{fmt}}", align=align, width=width)
def align_text(string, align="left", width=0):
"""Align text
Args:
string (str) : Strig.
align (str) : How to align the string.
width (int) : Width.
Returns:
string (str) : Aligned text.
Examples:
>>> from pycharmers.utils import align_text, toBLUE
>>> print(align_text("Hello world!", align=">", width=15))
Hello world!
>>> print(align_text(toBLUE("Hello world!"), align=">", width=15))
\x1b[34mHello world!\x1b[0m
"""
handleKeyError(lst=f_aligns, align=align)
s_width = visible_width(string)
pad = width - s_width
prefix, suffix = {
"<" : ("", " "*pad),
">" : (" "*pad, ""),
"^" : (" "*(pad//2), " "*(pad-pad//2)),
"=" : (" "*(pad//2), " "*(pad-pad//2)),
"left" : ("", " "*pad),
"right" : ("", " "*pad),
"center": (" "*(pad//2), " "*(pad-pad//2)),
}[align]
return prefix + string + suffix
def print_func_create(align=">", sign="", zero_padding=False, width=0,
grouping_option="", fmt="", color="", is_bg=False,
left_side_bar="", right_side_bar="",
left_margin=0, right_margin=0, end="\n"):
"""Create a function which prints a formatted text. Please see also the function `format_spec_create`.
Args:
color (str) : color.
is_bg (bool) : Whether to add color to the background or not.
left(right)_side_bar (str) : Characters to output to the Left/Right side.
left(right)_margin (int) : Left/Right side margin
end (str) : string appended after the last value, default a newline.
Returns:
print_func: Function that can be used like print
Examples:
>>> from pycharmers.utils import print_func_create
>>> print_func = print_func_create(width=8, align="^", left_side_bar="[", right_side_bar="]")
>>> print_func("hoge")
[ hoge ]
>>> print_func = print_func_create(align="<", left_side_bar="$ ")
>>> print_func("git clone https://github.com/iwasakishuto/Python-utils.git")
$ git clone https://github.com/iwasakishuto/Python-utils.git
>>> print_func("cd Python-utils")
$ cd Python-utils
>>> print_func("sudo python setup.py install")
$ sudo python setup.py install
"""
format_spec = format_spec_create(
align=align, sign=sign, zero_padding=zero_padding,
width=width, grouping_option=grouping_option, fmt=fmt
)
toCOLOR = _toCOLOR_create(color)
def print_func(fill):
info = f"{left_side_bar}{' '*left_margin}"
info += toCOLOR(format_spec(fill), is_bg=is_bg)
info += f"{' '*right_margin}{right_side_bar}"
print(info, end=end)
return print_func
class Table():
"""Create a beautiful table and show.
Args:
tablefmt (str) : The format of tabel.
enable_colspan (bool) : Whether to enable ``colspan`` or not.
mincolwidth (int) : The minimum width of each column.
Properties:
ncols(int) : the number of columns.
Methods:
set_cols : Set values to a table.
show : Show a table.
Examples:
>>> from pycharmers.utils import Table, toBLUE
>>> table = Table(enable_colspan=True)
>>> table.set_cols([1,2,""], colname="id")
>>> table.set_cols([toBLUE("abc"), "", "de"], color="GREEN")
>>> table.show()
+----+-------+
| id | col.2 |
+====+=======+
| 1 | \x1b[34mabc\x1b[0m |
+----+ +
| 2 | |
+ +-------+
| | \x1b[32mde\x1b[0m |
+----+-------+
"""
SUPPORTED_FORMATS = ["github", "rst"]
def __init__(self, tablefmt="rst", enable_colspan=True, mincolwidth=3):
handleKeyError(lst=Table.SUPPORTED_FORMATS, tablefmt=tablefmt)
self.cols = {}
self.table_width = 1
self.head = 0
self.tablefmt = tablefmt
self.enable_colspan = enable_colspan
self.mincolwidth = mincolwidth
@property
def ncols(self):
return len(self.cols)
def _print_thead(self, vedge="|"):
"""Print headers.
Args:
vedge (str) : The symbol of the vertical edge.
"""
for colname, options in self.cols.items():
print(vedge, end="")
options["print_title"](colname)
print(vedge)
def _print_border(self, vertex="+", hedge="-", alignmark=None, is_next_has_vals=None):
"""Print border.
Args:
vertex (str) : The symbol of vertex.
hedge (str) : The symbol of the horizontal edge.
alignmark (str) : The symbol which implies the alignment.
is_next_has_vals (list) : is each next column has the value or not.
"""
if alignmark is None:
alignmark=hedge
is_next_has_vals = is_next_has_vals or [True]*self.ncols
border = vertex
for (colname, options),is_next_has_val in zip(self.cols.items(),is_next_has_vals):
if (not self.enable_colspan) or is_next_has_val:
edge = alignmark + hedge*(options["colwidth"]-2) + alignmark
align = options["align"]
if align=="right":
edge = hedge + edge[1:]
elif align == "left":
edge = edge[:-1] + hedge
border += edge + vertex
else:
border += " "*options["colwidth"] + vertex
print(border)
def _print_tbody(self, head=None, vedge="|", hedge="-", need_border=True):
"""Print Values.
Args:
head (int) : How many lines to display.
vedge (str) : The symbol of the vertical edge.
hedge (str) : The symbol of the horizontal edge.
need_border (bool) : Whether the border between tbodys are needed or not.
"""
if head is None: head=self.head
loop_not_last = True
for i in range(head):
if i+1==head:
loop_not_last=False
is_next_has_vals=[]
for colname, options in self.cols.items():
print(vedge, end="")
values = options["values"]
options["print_values"](str(values[i]))
if loop_not_last:
is_next_has_vals.append(len(str(values[i+1]))!=0)
print(vedge)
if need_border and loop_not_last:
self._print_border(hedge=hedge, is_next_has_vals=is_next_has_vals)
def show(self, head=None, table_width=None, tablefmt=None):
"""Show a table
Args:
head (str) : Show the first ``head`` rows for the table.
table_width (int) : The table width.
"""
tablefmt = tablefmt or self.tablefmt
handleKeyError(lst=Table.SUPPORTED_FORMATS, tablefmt=tablefmt)
show_func = getattr(self, f"show_{tablefmt}")
show_func(head=head, table_width=table_width)
def show_github(self, head=None, table_width=None):
"""Show a table with github format.
Args:
head (str) : Show the first ``head`` rows for the table.
table_width (int) : The table width.
Examples:
>>> from pycharmers.utils import Table
>>> table = Table()
>>> table.set_cols(values=range(3), colname="Number")
>>> table.set_cols(values=["iwa", "saki", "shuto"], colname="Name")
>>> table.show_github()
| Number | Name |
|:------:|:-----:|
| 0 | iwa |
| 1 | saki |
| 2 | shuto |
"""
self._print_thead(vedge="|")
self._print_border(vertex="|", hedge="-", alignmark=":", is_next_has_vals=None)
self._print_tbody(head=head, hedge="-", need_border=False)
def show_rst(self, head=None, table_width=None):
"""Show a table with rst format.
Args:
head (str) : Show the first ``head`` rows for the table.
table_width (int) : The table width.
Examples:
>>> from pycharmers.utils import Table
>>> table = Table()
>>> table.set_cols(values=range(3), colname="Number")
>>> table.set_cols(values=["iwa", "saki", "shuto"], colname="Name")
>>> table.show_rst()
+--------+-------+
| Number | Name |
+========+=======+
| 0 | iwa |
+--------+-------+
| 1 | saki |
+--------+-------+
| 2 | shuto |
+--------+-------+
"""
self._print_border(vertex="+", hedge="-", is_next_has_vals=None)
self._print_thead(vedge="|")
self._print_border(vertex="+", hedge="=", is_next_has_vals=None)
self._print_tbody(head=head, hedge="-", need_border=True)
self._print_border(vertex="+", hedge="-", is_next_has_vals=None)
def set_cols(self, values, colname=None, width=0, align=">", sign="",
zero_padding=False, grouping_option="", fmt="", color="",
left_margin=1, right_margin=1):
"""Set values to a table.
Args:
values (array) : The array-like data.
colname (str) : The colname for ``values``.
**kwargs : See also ``print_func_create``
"""
colname = colname or f"col.{self.ncols+1}"
title_width = visible_width(str(colname))
format_spec = format_spec_create(
width=width, align=align, sign=sign, zero_padding=zero_padding,
grouping_option=grouping_option, fmt=fmt
)
width = max(max([visible_width(format_spec(v)) for v in values]), title_width)
self.table_width += width + left_margin + right_margin + 1
print_title = print_func_create(
align="center", sign="", zero_padding=False, width=width,
grouping_option="", fmt="", color="",
left_side_bar="", right_side_bar="", end="",
left_margin=left_margin, right_margin=right_margin,
)
print_values = print_func_create(
align=align, sign=sign, zero_padding=zero_padding, width=width,
grouping_option=grouping_option, fmt=fmt, color=color,
left_side_bar="", right_side_bar="", end="",
left_margin=left_margin, right_margin=right_margin,
)
self.cols.update({
colname: {
"print_values" : print_values,
"print_title" : print_title,
"values" : values,
"colwidth" : max(width+left_margin+right_margin, self.mincolwidth),
"align" : align,
}
})
nrows = len(values)
if self.head==0 or nrows < self.head:
self.head = nrows
def tabulate(tabular_data=[[]], headers=[], tablefmt="rst", aligns="left"):
"""Format a fixed width table for pretty printing.
Args:
tabular_data (list) : tbody contents. Must be a dual list.
headers (list) : thead contents.
tablefmt (str) : Table format for :py:class:`Table <pycharmers.utils.print_utils.Table>`
aligns (list) : How to align values in each col.
Examples:
>>> from pycharmers.utils import tabulate
>>> tabulate([[i*j for i in range(1,4)] for j in range(1,4)])
+-------+-------+-------+
| col.1 | col.2 | col.3 |
+=======+=======+=======+
| 1 | 2 | 3 |
+-------+-------+-------+
| 2 | 4 | 6 |
+-------+-------+-------+
| 3 | 6 | 9 |
+-------+-------+-------+
"""
ncols = len(tabular_data[0])
nheaders = len(headers)
headers += [None] * (ncols-nheaders)
table = Table(tablefmt=tablefmt)
if isinstance(aligns, str):
aligns = [aligns]*len(headers)
for col_value, header,align in zip(np.array(tabular_data).T, headers, aligns):
table.set_cols(values=col_value, colname=header, align=align)
table.show()
def print_dict_tree(dictionary, indent=4, rank=0, marks=["-", "*", "#"]):
"""Print Dictionary as a Tree.
Args:
dictionary (dict) : An input dictionary.
indent (int) : Indent.
rank (int) : A current rank.
marks (list) : List mark types.
Examples:
>>> from pycharmers.utils import print_dict_tree
>>> print_dict_tree({"a": 0, "b": 1})
- a: 0
- b: 1
>>> print_dict_tree({"a": 0, "b": {"b1": 1, "b2": 2}})
- a: 0
- b:
* b1: 1
* b2: 2
>>> print_dict_tree({"a": 0, "b": {"b1": 1, "b2": {"b21": 0, "b22": 1}}, "c": 3})
- a: 0
- b:
* b1: 1
* b2:
# b21: 0
# b22: 1
- c: 3
"""
if hasattr(dictionary, "items"):
for k,v in dictionary.items():
if hasattr(v, "items"):
print(f"{' '*indent*rank}{marks[rank%len(marks)]} {k}: ")
print_dict_tree(dictionary=v, indent=indent, rank=rank+1, marks=marks)
else:
print(f"{' '*indent*rank}{marks[rank%len(marks)]} {k}: {v}")
def pretty_3quote(*value, indent=0):
"""pretty 3 quote string.
Args:
indent (int) : If indent is a non-negative integer, then multiple lines will be pretty-printed with that indent level.
Examples:
>>> from pycharmers.utils import pretty_3quote
>>> print(*pretty_3quote(\"\"\"
... When I was 17, I read a quote that went something like:
... “If you live each day as if it was your last, someday you’ll most certainly be right.”
... It made an impression on me, and since then, for the past 33 years,
>>> \"\"\"))
When I was 17, I read a quote that went something like:
“If you live each day as if it was your last, someday you’ll most certainly be right.”
It made an impression on me, and since then, for the past 33 years,
"""
return [re.sub(pattern=r"\n\s+", repl=r"\n"+r" "*indent, string=val).strip("\n") for val in value]
def strip_invisible(s):
"""Remove invisible ANSI color codes.
Args:
s (str) : String.
Returns:
s (str) : String with invisible code removed.
Examples:
>>> from pycharmers.utils import strip_invisible, toBLUE
>>> strip_invisible("\x1b[31mhello\x1b[0m")
'hello'
>>> strip_invisible(toBLUE("hello"))
'hello'
>>> strip_invisible("hello")
'hello'
"""
if isinstance(s, str):
return re.sub(pattern=invisible_codes, repl="", string=s)
elif isinstance(s, bytes):
return re.sub(pattern=invisible_codes_bytes, repl="", string=s)
def visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
Args:
s (str) : String.
Returns:
width (int) : Visible width
Examples:
>>> from pycharmers.utils import visible_width, toBLUE
>>> visible_width(toBLUE("hello"))
5
>>> visible_width("こんにちは")
10
>>> visible_width("hello 世界。")
12
"""
if isinstance(s, str) or isinstance(s, bytes):
return wcwidth.wcswidth(strip_invisible(s))
else:
return wcwidth.wcswidth(str(s))
def str2pyexample(string):
"""Create a python example code.
Args:
string (str) : A string of Python Example Code.
Examples:
>>> from pycharmers.utils import str2pyexample
>>> WINDOW_NAME = "string2python"
>>> str2pyexample(\"\"\"
... import cv2
... import numpy as np
... frame = np.zeros(shape=(50, 100, 3), dtype=np.uint8)
... while (True):
... cv2.imshow(WINDOW_NAME, frame)
... if cv2.waitKey(0) == 27: break
... cv2.destroyAllWindows()
... \"\"\")
>>> import cv2
>>> import numpy as np
>>> frame = np.zeros(shape=(50, 100, 3), dtype=np.uint8)
>>> while (True):
... cv2.imshow(WINDOW_NAME, frame)
... if cv2.waitKey(0) == 27: break
>>> cv2.destroyAllWindows()
"""
for s in string.strip().split("\n"):
if len(s)==0 or s[0] == " ":
prefix = "..."
else:
prefix = ">>>"
print(f"{prefix} {s}")
|
"""
This module lets you practice the WAIT-FOR-EVENT pattern, using:
while True:
...
if <event has occurred>:
break
...
If you wish to use the while <condition>: form, that is OK too,
but we reserve the right to offer help only if you use the while True: form,
since many people find it easier to write correct code in that form.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Derek Whitley, their colleagues,
and PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
import testing_helper
import time
def main():
""" Calls the TEST functions in this module. """
print()
print("Un-comment and re-comment calls in MAIN one by one as you work.")
print()
# run_test_sum_until_prime_input()
# run_test_next_prime()
# run_test_sum_to_next_prime()
# run_test_prime_gap()
# run_test_wait_for_sum_of_cubes()
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out: Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
Type hints:
:type n: int
:rtype: bool
"""
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
def run_test_sum_until_prime_input():
""" Tests the wait_for_prime_input function by calling it. """
print()
print('--------------------------------------------------')
print('Testing the sum_until_prime_input function:')
print('--------------------------------------------------')
sum_until_prime_input()
def sum_until_prime_input():
"""
What comes in: Nothing.
What goes out: Nothing (i.e., None).
Side effects:
-- Repeatedly prompts the user for and inputs an integer
that is at least 2.
-- Stops when the input integer is prime.
-- Prints the sum of the input integers (including the prime one).
Example:
Here is a sample run, where the user input is to the right
of the colons:
Enter an integer greater than 1: 6
Enter an integer greater than 1: 100
Enter an integer greater than 1: 50
Enter an integer greater than 1: 11
The sum of the input integers is: 167
"""
# -------------------------------------------------------------------------
# TODO: 2. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
def run_test_next_prime():
""" Tests the next_prime function. """
# -------------------------------------------------------------------------
# TODO: 3. Implement this TEST function.
# It TESTS the wait_for_prime function defined below.
# Include at least ** 13 ** tests. (We supplied 12 tests for you.)
# __
# As usual, include both EXPECTED and ACTUAL results in your test
# and compute the latter BY HAND (not by running your program).
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the next_prime function:')
print('--------------------------------------------------')
format_string = ' next_prime( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 7
print_expected_result_of_test([6], expected, test_results, format_string)
actual = next_prime(6)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 11
print_expected_result_of_test([7], expected, test_results, format_string)
actual = next_prime(7)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 83
print_expected_result_of_test([80], expected, test_results, format_string)
actual = next_prime(80)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 156007
print_expected_result_of_test([155922], expected, test_results,
format_string)
actual = next_prime(155922)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 156007
print_expected_result_of_test([155921], expected, test_results,
format_string)
actual = next_prime(155921)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 156007
print_expected_result_of_test([156003], expected, test_results,
format_string)
actual = next_prime(156003)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 7:
print()
print('TEST STARTED! Has it ended?')
expected = 156007
print_expected_result_of_test([156006], expected, test_results,
format_string)
actual = next_prime(156006)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 8:
print()
print('TEST STARTED! Has it ended?')
expected = 156011
print_expected_result_of_test([156007], expected, test_results,
format_string)
actual = next_prime(156007)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 9:
print()
print('TEST STARTED! Has it ended?')
expected = 156011
print_expected_result_of_test([156009], expected, test_results,
format_string)
actual = next_prime(156009)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 10:
print()
print('TEST STARTED! Has it ended?')
expected = 156011
print_expected_result_of_test([156010], expected, test_results,
format_string)
actual = next_prime(156010)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 11:
print()
print('TEST STARTED! Has it ended?')
expected = 156019
print_expected_result_of_test([156011], expected, test_results,
format_string)
actual = next_prime(156011)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 12:
print()
print('TEST STARTED! Has it ended?')
expected = 3
print_expected_result_of_test([2], expected, test_results, format_string)
actual = next_prime(2)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# -------------------------------------------------------------------------
# TODO: 3 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
# -------------------------------------------------------------------------
# Test 13:
print()
print('TEST STARTED! Has it ended?')
print('TEST ENDED!')
# SUMMARY of test results:
print_summary_of_test_results(test_results)
def next_prime(m):
"""
What comes in: An integer m that is at least 2.
What goes out: Returns the smallest prime number strictly greater than m.
Side effects: None.
Examples:
-- next_prime(6) returns 7
-- next_prime(7) returns 11
-- next_prime(80) returns 83
-- next_prime(155921) returns 156007 [trust me!]
Type hints:
:type m: int
"""
# -------------------------------------------------------------------------
# TODO: 4. Implement and test this function.
# Note that you should write its TEST function first (above).
# __
# HINT: do NOT re-assign the parameter m.
# Instead, use an AUXILIARY variable (e.g., maybe_prime)
# for incrementing the number that you check for primality.
# __
# IMPLEMENTATION REQUIREMENT:
# -- Use (call) the is_prime function above appropriately.
# -------------------------------------------------------------------------
def run_test_sum_to_next_prime():
""" Tests the sum_to_next_prime function. """
# -------------------------------------------------------------------------
# TODO: 5. Implement this TEST function.
# It TESTS the sum_to_next_prime function defined below.
# Include at least ** 13 ** tests. (We supplied 12 tests for you.)
# __
# As usual, include both EXPECTED and ACTUAL results in your test
# and compute the latter BY HAND (not by running your program).
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the sum_to_next_prime function:')
print('--------------------------------------------------')
format_string = ' sum_to_next_prime( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 6 + 7 # which is 13
print_expected_result_of_test([6], expected, test_results, format_string)
actual = sum_to_next_prime(6)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 7 + 8 + 9 + 10 + 11 # which is 45
print_expected_result_of_test([7], expected, test_results, format_string)
actual = sum_to_next_prime(7)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 80 + 81 + 82 + 83 # which is 326
print_expected_result_of_test([80], expected, test_results, format_string)
actual = sum_to_next_prime(80)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 13412947
print_expected_result_of_test([155922], expected, test_results,
format_string)
actual = sum_to_next_prime(155922)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 13568868
print_expected_result_of_test([155921], expected, test_results,
format_string)
actual = sum_to_next_prime(155921)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 780025
print_expected_result_of_test([156003], expected, test_results,
format_string)
actual = sum_to_next_prime(156003)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 7:
print()
print('TEST STARTED! Has it ended?')
expected = 156006 + 156007 # which is 312013
print_expected_result_of_test([156006], expected, test_results,
format_string)
actual = sum_to_next_prime(156006)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 8:
print()
print('TEST STARTED! Has it ended?')
expected = 780045
print_expected_result_of_test([156007], expected, test_results,
format_string)
actual = sum_to_next_prime(156007)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 9:
print()
print('TEST STARTED! Has it ended?')
expected = 468030
print_expected_result_of_test([156009], expected, test_results,
format_string)
actual = sum_to_next_prime(156009)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 10:
print()
print('TEST STARTED! Has it ended?')
expected = 156010 + 156011 # which is 312021
print_expected_result_of_test([156010], expected, test_results,
format_string)
actual = sum_to_next_prime(156010)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 11:
print()
print('TEST STARTED! Has it ended?')
expected = 1404135
print_expected_result_of_test([156011], expected, test_results,
format_string)
actual = sum_to_next_prime(156011)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 12:
print()
print('TEST STARTED! Has it ended?')
expected = 2 + 3 # which is 5
print_expected_result_of_test([2], expected, test_results, format_string)
actual = sum_to_next_prime(2)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# -------------------------------------------------------------------------
# TODO: 5 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
# -------------------------------------------------------------------------
# Test 13:
print()
print('TEST STARTED! Has it ended?')
print('TEST ENDED!')
# SUMMARY of test results:
print_summary_of_test_results(test_results)
def sum_to_next_prime(m):
"""
What comes in: An integer m that is at least 2.
What goes out: Returns the sum of all the numbers from m
to the smallest prime number strictly greater than m, inclusive.
Side effects: None.
Examples:
-- next_prime(6) returns 6 + 7 which is 13
-- next_prime(7) returns 7 + 8 + 9 + 10 + 11 which is 45
-- next_prime(80) returns 80 + 81 + 82 + 83 which is 326
-- next_prime(155921) returns 13568868 [trust me!]
Type hints:
:type m: int
"""
# -------------------------------------------------------------------------
# TODO: 6. Implement and test this function.
# Note that you should write its TEST function first (above).
# __
# HINT: do NOT re-assign the parameter m.
# Instead, use an AUXILIARY variable (e.g., maybe_prime)
# for incrementing the number that you check for primality.
# __
# IMPLEMENTATION REQUIREMENT:
# -- Use (call) the is_prime function above appropriately.
# -- While you COULD solve this problem by first calling next_prime
# and then doing appropriate summing, do NOT use that solution
# (so that you get more practice at the wait-until pattern).
# -------------------------------------------------------------------------
def run_test_prime_gap():
""" Tests the prime_gap function. """
print()
print('--------------------------------------------------')
print('Testing the prime_gap function:')
print('--------------------------------------------------')
format_string = ' prime_gap( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 2
print_expected_result_of_test([1], expected, test_results, format_string)
actual = prime_gap(1)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 3
print_expected_result_of_test([2], expected, test_results, format_string)
actual = prime_gap(2)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 7
print_expected_result_of_test([3], expected, test_results, format_string)
actual = prime_gap(3)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 7
print_expected_result_of_test([4], expected, test_results, format_string)
actual = prime_gap(4)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 23
print_expected_result_of_test([5], expected, test_results, format_string)
actual = prime_gap(5)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 23
print_expected_result_of_test([6], expected, test_results, format_string)
actual = prime_gap(6)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 7:
print()
print('TEST STARTED! Has it ended?')
expected = 89
print_expected_result_of_test([8], expected, test_results, format_string)
actual = prime_gap(8)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 8:
print()
print('TEST STARTED! Has it ended?')
expected = 89
print_expected_result_of_test([7], expected, test_results, format_string)
actual = prime_gap(7)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 9:
print()
print('TEST STARTED! Has it ended?')
expected = 113
print_expected_result_of_test([10], expected, test_results, format_string)
actual = prime_gap(10)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 10:
print()
print('TEST STARTED! Has it ended?')
expected = 19609
print_expected_result_of_test([45], expected, test_results, format_string)
actual = prime_gap(45)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 11:
print()
print('TEST STARTED! Has it ended?')
expected = 19609
print_expected_result_of_test([50], expected, test_results, format_string)
actual = prime_gap(50)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 12:
print()
print('TEST STARTED! Has it ended?')
expected = 19609
print_expected_result_of_test([52], expected, test_results, format_string)
actual = prime_gap(52)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 13:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([54], expected, test_results, format_string)
actual = prime_gap(54)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 14:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([56], expected, test_results, format_string)
actual = prime_gap(56)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 15:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([58], expected, test_results, format_string)
actual = prime_gap(58)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 16:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([60], expected, test_results, format_string)
actual = prime_gap(60)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 17:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([62], expected, test_results, format_string)
actual = prime_gap(62)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 18:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([64], expected, test_results, format_string)
actual = prime_gap(64)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 19:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([66], expected, test_results, format_string)
actual = prime_gap(66)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 20:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([68], expected, test_results, format_string)
actual = prime_gap(68)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 21:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([70], expected, test_results, format_string)
actual = prime_gap(70)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 22:
print()
print('TEST STARTED! Has it ended?')
expected = 31397
print_expected_result_of_test([72], expected, test_results, format_string)
actual = prime_gap(72)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 23:
print()
print('TEST STARTED! Has it ended?')
expected = 155921
print_expected_result_of_test([74], expected, test_results, format_string)
actual = prime_gap(74)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 24:
print()
print('TEST STARTED! Has it ended?')
expected = 155921
print_expected_result_of_test([80], expected, test_results, format_string)
actual = prime_gap(80)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 25:
print()
print('TEST STARTED! Has it ended?')
expected = 370261
print_expected_result_of_test([100], expected, test_results, format_string)
actual = prime_gap(100)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 26:
print()
print('TEST STARTED! Has it ended?')
expected = 1357201
print_expected_result_of_test([120], expected, test_results, format_string)
actual = prime_gap(120)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 27:
print()
print('TEST STARTED! Has it ended?')
expected = 4652353
print_expected_result_of_test([150], expected, test_results, format_string)
actual = prime_gap(150)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# SUMMARY of test results:
print_summary_of_test_results(test_results)
def prime_gap(gap):
"""
What comes in: An integer gap that is at least 1.
What goes out:
Returns the smallest prime number whose "gap" is at least the given gap,
where the "gap" of a prime number is the difference between
that prime number and the next-smallest prime number.
Side effects: None.
Examples:
-- prime_gap(1) returns 2, because the next prime after 2 is 3,
and so the gap for 2 is 3 - 2 = 1,
and 2 is the smallest (and in fact, ONLY) prime with gap 1.
-- prime_gap(2) returns 3, because the next prime after 3 is 5,
and so the gap for 3 is 5 - 3 = 2,
and 3 is the smallest prime with gap 2.
-- prime_gap(3) returns 7, because the next prime after 7 is 11,
and so the gap for 7 is 11 - 7 = 4,
and 7 is the smallest prime with gap 3 or more.
(Note: There are no primes except 2 that have a gap that is odd.)
-- prime_gap(4) returns 7 for similar reasons.
-- prime_gap(6) returns 23, because the next prime after 23 is 29,
and so the gap for 23 is 29 - 23 = 6,
and 23 is the smallest prime with gap 6.
-- prime_gap(8) returns 89, because the next prime after 89 is 97,
and so the gap for 89 is 97 - 89 = 8,
and 89 is the smallest prime with gap 8.
-- prime_gap(52) returns 19609 [trust me!]
Type hints:
:type gap: int
:rtype: int
"""
# -------------------------------------------------------------------------
# TODO: 7. Implement and test this function.
# The testing code is already written for you (above).
# __
# IMPLEMENTATION REQUIREMENT:
# -- Use (call) the *** next_prime *** function
# (that you implemented above) appropriately.
# HINT: The last few tests may take up to a minute or two, depending
# on your computer. If they take longer (or never complete),
# then you are probably not using next_prime correctly.
# -------------------------------------------------------------------------
def run_test_wait_for_sum_of_cubes():
""" Tests the wait_for_sum_of_cubes function. """
# -------------------------------------------------------------------------
# TODO: 8. Implement this TEST function.
# It TESTS the wait_for_sum_of_cubes function defined below.
# Include at least ** 20 ** tests. (We supplied 18 tests for you.)
# __
# As usual, include both EXPECTED and ACTUAL results in your test
# and compute the latter BY HAND (not by running your program).
# -------------------------------------------------------------------------
print()
print('--------------------------------------------------')
print('Testing the wait_for_sum_of_cubes function:')
print('--------------------------------------------------')
format_string = ' wait_for_sum_of_cubes( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
print()
print('TEST STARTED! Has it ended?')
expected = 2
print_expected_result_of_test([4.3], expected, test_results, format_string)
actual = wait_for_sum_of_cubes(4.3)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 2:
print()
print('TEST STARTED! Has it ended?')
expected = 1
print_expected_result_of_test([1], expected, test_results, format_string)
actual = wait_for_sum_of_cubes(1)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 3:
print()
print('TEST STARTED! Has it ended?')
expected = 2
print_expected_result_of_test([1.000000000001], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(1.000000000001)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 4:
print()
print('TEST STARTED! Has it ended?')
expected = 2
print_expected_result_of_test([9], expected, test_results, format_string)
actual = wait_for_sum_of_cubes(9)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 5:
print()
print('TEST STARTED! Has it ended?')
expected = 3
print_expected_result_of_test([9.000000000001], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(9.000000000001)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 6:
print()
print('TEST STARTED! Has it ended?')
expected = 3
print_expected_result_of_test([35.9999999999999], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(35.9999999999999)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 7:
print()
print('TEST STARTED! Has it ended?')
expected = 3
print_expected_result_of_test([36], expected, test_results, format_string)
actual = wait_for_sum_of_cubes(36)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 8:
print()
print('TEST STARTED! Has it ended?')
expected = 4
print_expected_result_of_test([36.0000001], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(36.0000001)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 9:
print()
print('TEST STARTED! Has it ended?')
expected = 4
print_expected_result_of_test([58], expected, test_results, format_string)
actual = wait_for_sum_of_cubes(58)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 10:
print()
print('TEST STARTED! Has it ended?')
expected = 4
print_expected_result_of_test([100], expected, test_results, format_string)
actual = wait_for_sum_of_cubes(100)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 11:
print()
print('TEST STARTED! Has it ended?')
expected = 5
print_expected_result_of_test([100.00000001], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(100.00000001)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 12:
print()
print('TEST STARTED! Has it ended?')
expected = 8
print_expected_result_of_test([1000], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(1000)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 13:
print()
print('TEST STARTED! Has it ended?')
expected = 1
print_expected_result_of_test([-4.2], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(-4.2)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 14:
print()
print('TEST STARTED! Has it ended?')
expected = 8
print_expected_result_of_test([1296], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(1296)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 15:
print()
print('TEST STARTED! Has it ended?')
expected = 9
print_expected_result_of_test([1296.00000001], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(1296.00000001)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 16:
print()
print('TEST STARTED! Has it ended?')
expected = 25
print_expected_result_of_test([100000], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(100000)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 17:
print()
print('TEST STARTED! Has it ended?')
expected = 38
print_expected_result_of_test([500000], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(500000)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# Test 18:
print()
print('TEST STARTED! Has it ended?')
expected = 251
print_expected_result_of_test([1000000000], expected, test_results,
format_string)
actual = wait_for_sum_of_cubes(1000000000)
print_actual_result_of_test(expected, actual, test_results)
print('TEST ENDED!')
# -------------------------------------------------------------------------
# TODO: 8 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
# __
# *** Use wait_for_sum_of_cubes(30.33) as your test here.
# Compute the expected answer BY HAND, as always.
# -------------------------------------------------------------------------
# Test 19:
print()
print('TEST STARTED! Has it ended?')
print('TEST ENDED!')
# -------------------------------------------------------------------------
# TODO: 8 (continued):
# PUT YOUR TEST ** IN THE SPACE BETWEEN ** the
# print('TEST STARTED!' ...) and print('TEST ENDED') lines below.
# -------------------------------------------------------------------------
# Test 20:
print()
print('TEST STARTED! Has it ended?')
print('TEST ENDED!')
# SUMMARY of test results:
print_summary_of_test_results(test_results)
def wait_for_sum_of_cubes(x):
"""
What comes in: A number x.
What goes out: Returns the smallest positive integer n
such that the sum
1 cubed + 2 cubed + 3 cubed + ... + n cubed
is greater than or equal to x.
Side effects: None.
Examples:
-- If x is 4.3, this function returns 2 because:
1 cubed = 1 which is less than 4.3
but
1 cubed + 2 cubed = 9 which is greater than or equal to 4.3
-- For similar reasons, if x is any number in the range (1, 9]
(that is, numbers greater than 1 but less than or equal to 9),
this function returns 2.
-- If x is 58, this function returns 4 because:
1 cubed + 2 cubed + 3 cubed = 36 which is less than 58
but
1 cubed + 2 cubed + 3 cubed + 4 cubed = 100
which is greater than or equal to 58
-- For similar reasons, if x is any number in the range (36, 100],
this function returns 4.
-- If x is 1000, this function returns 8 because:
1 + 8 + 27 + 64 + ... + (7**3) = 784
but
1 + 8 + 27 + 64 + ... + (8**3) = 1296
-- For similar reasons, f x is 1296, this function returns 8.
-- if x is -5.2 (or any number less than or equal to 1),
this function returns 1.
Type hints:
:type x: float [or an int]
"""
# -------------------------------------------------------------------------
# TODO: 9. Implement and test this function.
# Note that you should write its TEST function first (above).
# __
# IMPLEMENTATION REQUIREMENT:
# -- Solve this using the wait-until-event pattern.
# __
# Note for the mathematically inclined: One could figure out
# (or look up) a formula that would allow a faster computation.
# But no fair using any such approach in this implementation.
# -------------------------------------------------------------------------
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string, suffix=''):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results, format_string,
suffix)
def print_actual_result_of_test(expected, actual, test_results,
precision=None):
testing_helper.print_actual_result_of_test(expected, actual,
test_results, precision)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
import random
class CoinTossBoxLayout(BoxLayout):
def choice(self, guess):
output = guess
if self.ids.result.text == "": # if the result box is empty, then print the result. If there is already a result, do nothing.
self.ids.result.text = output
def get_random(self):
random_list = ['Right!', 'Nope, try again!', 'Need Coffee!', 'Still not Heads']
random_choice = random.choice(random_list)
if self.ids.result.text == "":
self.ids.result.text = random_choice
def clear(self):
self.ids.result.text = "" # erases the results so user can guess again
class CoinTossApp(App):
def build(self):
return CoinTossBoxLayout()
if __name__ == "__main__":
CoinTossApp().run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""New daily plot generation - using GGD data.
Created on Tue Nov 16 22:02:09 2021
@hk_nien
"""
import matplotlib.pyplot as plt
import tools
import nlcovidstats as nlcs
import nlcovidstats_data as ncd
import pandas as pd
import plot_aantal_tests_updates as ggd_tests
import calc_R_from_ggd_tests as ggd_R
import ggd_data
import os
# def download_ggd_tests(force=False):
# #%% R graph for daily Twitter update
if __name__ == '__main__':
plt.close('all')
nlcs.reset_plots()
ggd_data.update_ggd_tests(force=1) # updated at :45
nlcs.init_data(autoupdate=True)
ncd.check_RIVM_message()
print('---GGD tests---')
ggd_tests.plot_daily_tests_and_delays('2021-10-01')
# ggd_tests.plot_daily_tests_and_delays('2021-09-01', src_col='n_pos')
plt.pause(0.25)
print('--R calculation--')
ggd_R.plot_rivm_and_ggd_positives(140, yscale=('log', 1000, 30000))
plt.pause(0.25)
ggd_R.plot_R_graph_multiple_methods(num_days=100)
plt.pause(0.25)
nlcs.construct_Dfunc(nlcs.DELAY_INF2REP, plot=True)
#%%
if 0:
#%% check recent anomaly correction
plt.close('all')
nlcs.init_data(autoupdate=True)
#ggd_R.plot_rivm_and_ggd_positives(25, yscale=('linear', 7000, 25000))
#plt.pause(0.4)
ggd_R.plot_rivm_and_ggd_positives(25, True, yscale=('linear', 5000, 25000))
#%% Show TvT performance
ggd_R.plot_R_graph_multiple_methods(
num_days=240, ylim=(-0.9, 4.2),
methods=('rivm', 'melding', 'tvt'),
)
#%% GGD tests in regions
fig, ax = plt.subplots(figsize=(7, 4), tight_layout=True)
for rre in ['Utrecht', 'Midden- en West-Brabant', 'Groningen', 'Drenthe', 'Twente']:
df = ggd_data.load_ggd_pos_tests(region_regexp=rre)
ax.step(df.index[-100:], df['n_tested'][-100:], where='mid', label=rre)
ax.set_yscale('log')
ax.set_title('Uitgevoerde GGD tests per regio')
ax.set_xlabel('Datum monstername')
ax.legend()
import tools
tools.set_xaxis_dateformat(ax)
fig.show()
|
"""
Handled exceptions raised by REST framework.
In addition Django's built in 403 and 404 exceptions are handled.
(`django.http.Http404` and `django.core.exceptions.PermissionDenied`)
"""
from __future__ import unicode_literals
from rest_framework import status
import math
class APIException(Exception):
"""
Base class for REST framework exceptions.
Subclasses should provide `.status_code` and `.default_detail` properties.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = ''
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class ParseError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Malformed request.'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Incorrect authentication credentials.'
class NotAuthenticated(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Authentication credentials were not provided.'
class PermissionDenied(APIException):
status_code = status.HTTP_403_FORBIDDEN
default_detail = 'You do not have permission to perform this action.'
class MethodNotAllowed(APIException):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
default_detail = "Method '%s' not allowed."
def __init__(self, method, detail=None):
self.detail = (detail or self.default_detail) % method
class NotAcceptable(APIException):
status_code = status.HTTP_406_NOT_ACCEPTABLE
default_detail = "Could not satisfy the request's Accept header"
def __init__(self, detail=None, available_renderers=None):
self.detail = detail or self.default_detail
self.available_renderers = available_renderers
class UnsupportedMediaType(APIException):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
default_detail = "Unsupported media type '%s' in request."
def __init__(self, media_type, detail=None):
self.detail = (detail or self.default_detail) % media_type
class Throttled(APIException):
status_code = status.HTTP_429_TOO_MANY_REQUESTS
default_detail = 'Request was throttled.'
extra_detail = "Expected available in %d second%s."
def __init__(self, wait=None, detail=None):
if wait is None:
self.detail = detail or self.default_detail
self.wait = None
else:
format = (detail or self.default_detail) + self.extra_detail
self.detail = format % (wait, wait != 1 and 's' or '')
self.wait = math.ceil(wait)
|
from cs50 import get_int
# Recieve the pyramid's height, while the input is on the defined range
h = 0 #initialing the Height
while (h > 8 or h < 1):
h = get_int("Height: ")
#print the hashes
for i in range(1, h + 1):
print( " " * (h-i) + "#"*(i) )
|
#!/usr/bin/env python
# /* ----------------------------------------------------------------------------
# * Copyright 2021, Jesus Tordesillas Torres, Aerospace Controls Laboratory
# * Massachusetts Institute of Technology
# * All Rights Reserved
# * Authors: Jesus Tordesillas, et al.
# * See LICENSE file for the license information
# * -------------------------------------------------------------------------- */
import random
import roslib
import rospy
import math
from panther_msgs.msg import DynTraj
from snapstack_msgs.msg import Goal, State
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Vector3
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
from std_msgs.msg import ColorRGBA
import numpy as np
from numpy import linalg as LA
import random
from tf.transformations import quaternion_from_euler, euler_from_quaternion
import tf
from math import sin, cos, tan
import os
import copy
import sys
import rospkg
class FakeSim:
def getTrajectoryPosMeshBBox(self, i):
x=random.uniform(self.x_min, self.x_max);
y=random.uniform(self.y_min, self.y_max);
z=random.uniform(self.z_min, self.z_max);
offset=random.uniform(-2*math.pi, 2*math.pi);
slower=random.uniform(self.slower_min, self.slower_max);
s=self.scale
if(self.getType(i)=="dynamic"):
mesh=random.choice(self.available_meshes_dynamic);
bbox=self.bbox_dynamic;
[x_string, y_string, z_string] = self.trefoil(x,y,z, s,s,s, offset, slower)
else:
mesh=random.choice(self.available_meshes_static);
bbox=self.bbox_static_vert;
z=bbox[2]/2.0;
[x_string, y_string, z_string] = self.wave_in_z(x, y, z, s, offset, 1.0)
return [x_string, y_string, z_string, x, y, z, mesh, bbox]
def getType(self,i):
if(i<self.num_of_dyn_objects):
return "dynamic"
else:
return "static"
def __init__(self, total_num_obs,gazebo):
self.state=State()
name = rospy.get_namespace()
self.name = name[1:-1]
print(total_num_obs)
self.num_of_dyn_objects=int(0.65*total_num_obs);
self.num_of_stat_objects=total_num_obs-self.num_of_dyn_objects;
self.x_min= 2.0
self.x_max= 75.0
self.y_min= -3.0
self.y_max= 3.0
self.z_min= 1.0
self.z_max= 2.0
self.scale=1.0;
self.slower_min=1.1
self.slower_max= 1.1
self.bbox_dynamic=[0.8, 0.8, 0.8]
self.bbox_static_vert=[0.4, 0.4, 4]
self.bbox_static_horiz=[0.4, 8, 0.4]
self.percentage_vert=0.0;
self.name_obs="obs_"
#HACK
self.num_of_dyn_objects=2;
self.num_of_stat_objects=0;
self.x_min= 2.0
self.x_max= 3.0
self.y_min= -2.0
self.y_max= 2.0
#END OF HACK
self.available_meshes_static=["package://panther/meshes/ConcreteDamage01b/model3.dae", "package://panther/meshes/ConcreteDamage01b/model2.dae"]
self.available_meshes_dynamic=["package://panther/meshes/ConcreteDamage01b/model4.dae"]
self.marker_array=MarkerArray();
self.all_dyn_traj=[]
self.total_num_obs=self.num_of_dyn_objects + self.num_of_stat_objects
for i in range(self.total_num_obs):
[traj_x, traj_y, traj_z, x, y, z, mesh, bbox]=self.getTrajectoryPosMeshBBox(i);
self.marker_array.markers.append(self.generateMarker(mesh, bbox, i));
dynamic_trajectory_msg=DynTraj();
dynamic_trajectory_msg.use_pwp_field=False;
dynamic_trajectory_msg.is_agent=False;
dynamic_trajectory_msg.header.stamp= rospy.Time.now();
dynamic_trajectory_msg.s_mean = [traj_x, traj_y, traj_z]
dynamic_trajectory_msg.s_var = ["0.0", "0.0", "0.0"]
dynamic_trajectory_msg.bbox = [bbox[0], bbox[1], bbox[2]];
dynamic_trajectory_msg.pos.x=x #Current position, will be updated later
dynamic_trajectory_msg.pos.y=y #Current position, will be updated later
dynamic_trajectory_msg.pos.z=z #Current position, will be updated later
dynamic_trajectory_msg.id = 4000+ i #Current id 4000 to avoid interference with ids from agents #TODO
self.all_dyn_traj.append(dynamic_trajectory_msg);
self.pubTraj = rospy.Publisher('/trajs', DynTraj, queue_size=1, latch=True)
self.pubShapes_dynamic_mesh = rospy.Publisher('/obstacles_mesh', MarkerArray, queue_size=1, latch=True)
#self.pubGazeboState = rospy.Publisher('/gazebo/set_model_state', ModelState, queue_size=100)
if(gazebo):
# Spawn all the objects in Gazebo
for i in range(self.total_num_obs):
self.spawnGazeboObstacle(i)
rospy.sleep(0.5)
def generateMarker(self, mesh, bbox, i):
marker=Marker();
marker.id=i;
marker.ns="mesh";
marker.header.frame_id="world"
marker.type=marker.MESH_RESOURCE;
marker.action=marker.ADD;
marker.pose.position.x=0.0 #Will be updated later
marker.pose.position.y=0.0 #Will be updated later
marker.pose.position.z=0.0 #Will be updated later
marker.pose.orientation.x=0.0;
marker.pose.orientation.y=0.0;
marker.pose.orientation.z=0.0;
marker.pose.orientation.w=1.0;
marker.lifetime = rospy.Duration.from_sec(0.0);
marker.mesh_use_embedded_materials=True
marker.mesh_resource=mesh
marker.scale.x=bbox[0];
marker.scale.y=bbox[1];
marker.scale.z=bbox[2];
return marker
def pubTF(self, timer):
br = tf.TransformBroadcaster()
marker_array_static_mesh=MarkerArray();
marker_array_dynamic_mesh=MarkerArray();
for i in range(self.total_num_obs):
t_ros=rospy.Time.now()
t=rospy.get_time(); #Same as before, but it's float
marker=self.marker_array.markers[i];
x = eval(self.all_dyn_traj[i].s_mean[0])
y = eval(self.all_dyn_traj[i].s_mean[1])
z = eval(self.all_dyn_traj[i].s_mean[2])
# Set the stamp and the current pos
self.all_dyn_traj[i].header.stamp= t_ros;
self.all_dyn_traj[i].pos.x=x #Current position
self.all_dyn_traj[i].pos.y=y #Current position
self.all_dyn_traj[i].pos.z=z #Current position
self.pubTraj.publish(self.all_dyn_traj[i])
br.sendTransform((x, y, z), (0,0,0,1), t_ros, self.name_obs+str(self.all_dyn_traj[i].id), "world")
self.marker_array.markers[i].pose.position.x=x;
self.marker_array.markers[i].pose.position.y=y;
self.marker_array.markers[i].pose.position.z=z;
#If you want to move the objets in gazebo. BETTER WITH THE PLUGIN
# gazebo_state = ModelState()
# gazebo_state.model_name = str(i)#"all_"+str(i)
# gazebo_state.pose.position.x = x
# gazebo_state.pose.position.y = y
# gazebo_state.pose.position.z = z
# gazebo_state.reference_frame = "world"
# self.pubGazeboState.publish(gazebo_state)
# x=self.x_all[i];
# y=self.y_all[i];
# z=self.z_all[i];
# s="""
# rosservice call /gazebo/set_model_state "model_state:
# model_name: '"""+str(i)+"""'
# pose:
# position:
# x: """+str(x)+"""
# y: """+str(y)+"""
# z: """+str(z)+"""
# orientation:
# x: 0.0
# y: 0.0
# z: 0.0
# w: 0.0
# twist:
# linear:
# x: 0.0
# y: 0.0
# z: 0.0
# angular:
# x: 0.0
# y: 0.0
# z: 0.0
# reference_frame: 'world'"
# """
# os.system(s)
#If you want to see the objects in rviz
self.pubShapes_dynamic_mesh.publish(self.marker_array)
def static(self,x,y,z):
return [str(x), str(y), str(z)]
# Trefoil knot, https://en.wikipedia.org/wiki/Trefoil_knot
def trefoil(self,x,y,z,scale_x, scale_y, scale_z, offset, slower):
#slower=1.0; #The higher, the slower the obstacles move"
tt='t/' + str(slower)+'+';
x_string=str(scale_x)+'*(sin('+tt +str(offset)+') + 2 * sin(2 * '+tt +str(offset)+'))' +'+' + str(x); #'2*sin(t)'
y_string=str(scale_y)+'*(cos('+tt +str(offset)+') - 2 * cos(2 * '+tt +str(offset)+'))' +'+' + str(y); #'2*cos(t)'
z_string=str(scale_z)+'*(-sin(3 * '+tt +str(offset)+'))' + '+' + str(z); #'1.0'
# x_string='1';
# y_string='1';
# z_string='1';
return [x_string, y_string, z_string]
def wave_in_z(self,x,y,z,scale, offset, slower):
tt='t/' + str(slower)+'+';
x_string=str(x);
y_string=str(y)
z_string=str(scale)+'*(-sin( '+tt +str(offset)+'))' + '+' + str(z);
# x_string='1';
# y_string='1';
# z_string='1';
return [x_string, y_string, z_string]
def spawnGazeboObstacle(self, i):
rospack = rospkg.RosPack();
path_panther=rospack.get_path('panther');
path_file=path_panther+"/meshes/tmp.urdf"
f = open(path_file, "w") #TODO: This works, but it'd better not having to create this file
scale=self.marker_array.markers[i].scale;
scale='"'+str(scale.x)+" "+str(scale.y)+" "+str(scale.z)+'"';
x=self.all_dyn_traj[i].pos.x;
y=self.all_dyn_traj[i].pos.y;
z=self.all_dyn_traj[i].pos.z;
#Remember NOT to include de <collision> tag (Gazebo goes much slower if you do so)
f.write("""
<robot name="name_robot">
<link name="name_link">
<inertial>
<mass value="0.200" />
<origin xyz="0 0 0" rpy="0 0 0" />
<inertia ixx="5.8083e-4" ixy="0" ixz="0" iyy="3.0833e-5" iyz="0" izz="5.9083e-4" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="""+'"'+self.marker_array.markers[i].mesh_resource+'"'+""" scale="""+scale+"""/>
</geometry>
</visual>
</link>
<gazebo>
<plugin name="move_model" filename="libmove_model.so">
<traj_x>"""+self.all_dyn_traj[i].s_mean[0]+"""</traj_x>
<traj_y>"""+self.all_dyn_traj[i].s_mean[1]+"""</traj_y>
<traj_z>"""+self.all_dyn_traj[i].s_mean[2]+"""</traj_z>
</plugin>
</gazebo>
</robot>
""")
# <plugin name="pr2_pose_test" filename="libpr2_pose_test.so"/>
f.close()
os.system("rosrun gazebo_ros spawn_model -file `rospack find panther`/meshes/tmp.urdf -urdf -x " + str(x) + " -y " + str(y) + " -z " + str(z) + " -model "+self.name_obs+str(i)); #all_
os.remove(path_file)
def startNode(total_num_obs, gazebo):
c = FakeSim(total_num_obs,gazebo)
rospy.Timer(rospy.Duration(0.01), c.pubTF)
rospy.spin()
if __name__ == '__main__':
# TODO: use https://docs.python.org/3.3/library/argparse.html
print("********************************")
print(sys.argv)
if(len(sys.argv)<=1):
# print("Usage: python dynamic_corridor.py [Num_of_obstacles]")
total_num_obs=45;
else:
total_num_obs=int(sys.argv[1])
gazebo=((sys.argv[2]=='True') or (sys.argv[2]=='true'))
# total_num_obs=140
ns = rospy.get_namespace()
try:
rospy.init_node('dynamic_obstacles')
startNode(total_num_obs,gazebo)
except rospy.ROSInterruptException:
pass
|
'''
Errors in Dagster:
All errors thrown by the Dagster framework inherit from DagsterError. Users should not inherit their
own exceptions from DagsterError. This how dagster communicates errors like definition errors and
invariant violations.
There is another exception base class, DagsterUserCodeExecutionError, which is meant to be used in
concert with the user_code_error_boundary. Dagster calls into user code which can be arbitrary
computation and can itself throw. The pattern here is to use the error boundary to catch this
exception, and then rethrow that exception wrapped in a DagsterUserCodeExecutionError-derived
exception. This new exception is there to embellish the original error with additional context that
the dagster runtime is aware of.
'''
import sys
import traceback
from contextlib import contextmanager
from future.utils import raise_from
from dagster import check
class DagsterError(Exception):
@property
def is_user_code_error(self):
return False
class DagsterInvalidDefinitionError(DagsterError):
'''Indicates that some violation of the definition rules has been violated by the user'''
class DagsterInvariantViolationError(DagsterError):
'''Indicates the user has violated a well-defined invariant that can only be deteremined
at runtime.
'''
class DagsterExecutionStepNotFoundError(DagsterError):
'''
Throw when the user specifies execution step keys that do not exist.
'''
def __init__(self, *args, **kwargs):
self.step_keys = check.list_param(kwargs.pop('step_keys'), 'step_keys', str)
super(DagsterExecutionStepNotFoundError, self).__init__(*args, **kwargs)
class DagsterRunNotFoundError(DagsterError):
def __init__(self, *args, **kwargs):
self.invalid_run_id = check.str_param(kwargs.pop('invalid_run_id'), 'invalid_run_id')
super(DagsterRunNotFoundError, self).__init__(*args, **kwargs)
class DagsterStepOutputNotFoundError(DagsterError):
def __init__(self, *args, **kwargs):
self.step_key = check.str_param(kwargs.pop('step_key'), 'step_key')
self.output_name = check.str_param(kwargs.pop('output_name'), 'output_name')
super(DagsterStepOutputNotFoundError, self).__init__(*args, **kwargs)
def _add_inner_exception_for_py2(msg, exc_info):
if sys.version_info[0] == 2:
return (
msg
+ '\n\nThe above exception was the direct cause of the following exception:\n\n'
+ ''.join(traceback.format_exception(*exc_info))
)
return msg
@contextmanager
def user_code_error_boundary(error_cls, msg_fn, **kwargs):
'''
Wraps the execution of user-space code in an error boundary. This places a uniform
policy around an user code invoked by the framework. This ensures that all user
errors are wrapped in an exception derived from DagsterUserCodeExecutionError,
and that the original stack trace of the user error is preserved, so that it
can be reported without confusing framework code in the stack trace, if a
tool author wishes to do so. This has been especially help in a notebooking
context.
Example:
with user_code_error_boundary(
# Pass a class that inherits from DagsterUserCodeExecutionError
DagstermillExecutionError,
# Pass a function that produces a message
lambda: 'Error occurred during the execution of Dagstermill solid '
'{solid_name}: {notebook_path}'.format(
solid_name=name, notebook_path=notebook_path
),
):
call_user_provided_function()
'''
check.callable_param(msg_fn, 'msg_fn')
check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError)
try:
yield
except DagsterError as de:
# The system has thrown an error that is part of the user-framework contract
raise de
except Exception as e: # pylint: disable=W0703
# An exception has been thrown by user code and computation should cease
# with the error reported further up the stack
raise_from(
error_cls(msg_fn(), user_exception=e, original_exc_info=sys.exc_info(), **kwargs), e
)
class DagsterUserCodeExecutionError(DagsterError):
'''
This is the base class for any exception that is meant to wrap an Exception thrown by user code.
It wraps that existing user code. The original_exc_info argument to the ctor is meant to be a
sys.exc_info at the site of constructor.
'''
def __init__(self, *args, **kwargs):
# original_exc_info should be gotten from a sys.exc_info() call at the
# callsite inside of the exception handler. this will allow consuming
# code to *re-raise* the user error in it's original format
# for cleaner error reporting that does not have framework code in it
user_exception = check.inst_param(kwargs.pop('user_exception'), 'user_exception', Exception)
original_exc_info = check.tuple_param(kwargs.pop('original_exc_info'), 'original_exc_info')
if original_exc_info[0] is None:
raise Exception('bad dude {}'.format(type(self)))
msg = _add_inner_exception_for_py2(args[0], original_exc_info)
super(DagsterUserCodeExecutionError, self).__init__(msg, *args[1:], **kwargs)
self.user_exception = check.opt_inst_param(user_exception, 'user_exception', Exception)
self.original_exc_info = original_exc_info
@property
def is_user_specified_failure(self):
from dagster.core.definitions.events import Failure
return isinstance(self.user_exception, Failure)
@property
def user_specified_failure(self):
check.invariant(
self.is_user_specified_failure,
(
'Can only call if user-specified failure (i.e. the user threw '
'an explicit Failure event in user-code'
),
)
return self.user_exception
@property
def is_user_code_error(self):
return True
class DagsterTypeCheckError(DagsterUserCodeExecutionError):
'''Indicates an error in the solid type system at runtime. E.g. a solid receives an
unexpected input, or produces an output that does not match the type of the output definition.
'''
class DagsterExecutionStepExecutionError(DagsterUserCodeExecutionError):
'''Indicates an error occured during the body of execution step execution'''
def __init__(self, *args, **kwargs):
self.step_key = check.str_param(kwargs.pop('step_key'), 'step_key')
self.solid_name = check.str_param(kwargs.pop('solid_name'), 'solid_name')
self.solid_def_name = check.str_param(kwargs.pop('solid_def_name'), 'solid_def_name')
super(DagsterExecutionStepExecutionError, self).__init__(*args, **kwargs)
class DagsterResourceFunctionError(DagsterUserCodeExecutionError):
'''Indicates an error occured during the body of resource_fn in a ResourceDefinition'''
class DagsterInvalidConfigError(DagsterError):
def __init__(self, pipeline, errors, config_value, *args, **kwargs):
from dagster.core.definitions import PipelineDefinition
from dagster.core.types.evaluator.errors import friendly_string_for_error, EvaluationError
self.pipeline = check.opt_inst_param(pipeline, 'pipeline', PipelineDefinition)
self.errors = check.list_param(errors, 'errors', of_type=EvaluationError)
self.config_value = config_value
if pipeline is not None:
error_msg = 'Pipeline "{pipeline}" config errors:'.format(pipeline=pipeline.name)
else:
error_msg = 'Config errors:'
error_messages = []
for i_error, error in enumerate(self.errors):
error_message = friendly_string_for_error(error)
error_messages.append(error_message)
error_msg += '\n Error {i_error}: {error_message}'.format(
i_error=i_error + 1, error_message=error_message
)
self.message = error_msg
self.error_messages = error_messages
super(DagsterInvalidConfigError, self).__init__(error_msg, *args, **kwargs)
class DagsterUnmetExecutorRequirementsError(DagsterError):
'''Indicates the resolved executor is incompatible with the state of other systems
such as the DagsterInstance or system storage configuration.
'''
|
#!/usr/bin/env python
import getopt
from sys import argv
from os import system, path
import os
from repo_handler import create_repo_handler
build_log = "__build.log";
para = 0;
def generate_arg_file(src_dir, src_file, arg_file, deps_dir, force_rebuilt):
if not force_rebuilt:
print "Normal Compiling....";
cmd = build_cmd;
if para != 0:
cmd += " -j " + str(para);
cmd += " -p " + deps_dir + " -d " + src_file + " -c " + src_dir + " " + arg_file + " >>" + build_log + " 2>&1";
ret = system(cmd);
else:
ret = 1;
if (ret != 0):
print "Full Compiling...."
cmd = build_cmd;
if para != 0:
cmd += " -j " + str(para);
cmd += " -p " + deps_dir + " -d " + src_file +" " + src_dir + " " + arg_file + " >>" + build_log + " 2>&1";
ret = system(cmd);
if (ret != 0):
return False;
return True;
def parseArgFile(src_dir, arg_file):
f = open(arg_file, "r");
lines = f.readlines();
assert( len(lines) > 1);
build_dir = lines[0].strip();
if (build_dir == "."):
build_dir = src_dir;
args = lines[1].strip().split();
i = 0;
new_args = [];
disabled_options = set(["-c", "-Wlogical-op", "-fno-delete-null-pointer-checks", "-fno-strict-overflow", "-fno-strict-overflow", "-Wlogical-op", "-Wjump-misses-init", "-prefer-non-pic", "-prefer-pic"]);
while (i < len(args)):
if (args[i] == "-o"):
i+=1;
elif not (args[i] in disabled_options):
new_args.append(args[i]);
i+=1;
return (build_dir, " ".join(new_args));
def dump_rev_source(src_dir, src_file, arg_file, dump_file):
(build_dir, build_arg) = parseArgFile(src_dir, arg_file);
abs_src_file = path.abspath(src_dir + "/" + src_file);
cmd = "clang " + build_arg + " -E " + abs_src_file + " -o " + dump_file;
print cmd;
ori_dir = os.getcwd();
os.chdir(build_dir);
ret = system(cmd);
os.chdir(ori_dir);
return ret == 0;
def diff(src_dir1, src_file1, arg_file1, src_dir2, src_file2, arg_file2):
cmd = differ_cmd + " " + src_dir1 + " " + src_file1 + " " + src_dir2 + " " + \
src_file2 + " -argf " + arg_file1 + " -argf2 " + arg_file2 + " -print-diff-only ";
ret = system(cmd);
return (ret == 0);
if __name__ == "__main__":
fulldir = path.abspath(path.dirname(argv[0]));
differ_cmd = fulldir + "/../build/src/pdiffer";
opts, args = getopt.getopt(argv[1:], "ay:j:", ["depdir=", "o-revs=", "i-revs=",
"sid=", "eid=", "print-match", "dump-source="]);
deps_dir = os.path.abspath("../build/benchmarks/php-deps");
out_rev_file = "";
in_rev_file = "";
sid = 0;
eid = 10000000;
fix_only = True;
year_limit = 2010;
dump_dir = "";
for o, a in opts:
if o == "-a":
fix_only = False;
elif o == "-j":
para = int(a);
elif o == "-y":
year_limit = int(a)
elif o == "--depdir":
deps_dir = a;
elif o == "--o-revs":
out_rev_file = a;
elif o == "--i-revs":
in_rev_file = a;
elif o == "--sid":
sid = int(a);
elif o == "--eid":
eid = int(a);
elif o == "--dump-source":
dump_dir = a;
repo_dir = args[0];
repo_type = args[1];
repo = create_repo_handler(repo_dir, repo_type);
build_cmd = args[2];
rev_result_file = args[3];
if in_rev_file == "":
revs = repo.get_revs(fix_only, year_limit);
else:
fin = open(in_rev_file, "r");
lines = fin.readlines();
revs = [];
for line in lines:
tokens = line.strip().split();
if (len(tokens) == 1):
revs.append( (tokens[0], repo.get_parent_rev(tokens[0]), ""));
else:
revs.append( (tokens[0], tokens[1], "") );
fin.close();
if out_rev_file != "":
fout = open(out_rev_file, "w");
for rev, parent_rev, _ in revs:
print >> fout, rev, parent_rev;
fout.close();
exit(0);
if sid >= len(revs):
print "sid is larger than the total number of revs, exit";
exit(0);
if eid > len(revs):
eid = len(revs);
tmp_dir = "__tmp";
system("rm -rf " + tmp_dir);
system("mkdir " + tmp_dir);
tmp_repo1 = tmp_dir + "/src1";
tmp_repo2 = tmp_dir + "/src2";
system("cp -rf " + repo_dir + " " + tmp_repo1);
system("cp -rf " + repo_dir + " " + tmp_repo2);
system("rm -rf " + build_log);
repo1 = create_repo_handler(tmp_repo1, repo_type);
repo2 = create_repo_handler(tmp_repo2, repo_type);
if dump_dir != "":
system("rm -rf " + dump_dir);
system("mkdir " + dump_dir);
dump_dir = os.path.abspath(dump_dir);
fout = open(rev_result_file, "w");
total_cnt = 0;
first = True;
dump_failed = [];
for i in range(sid, eid):
(rev, parent_rev, _) = revs[i];
print "Processing rev: ", rev;
diff_res = repo.get_diff_for_c(parent_rev, rev);
if (len(diff_res) == 0):
print "No source file changed!";
continue;
# skip this revision becuase more than one files changed
if (len(diff_res) > 1):
print "Too many file modified!";
continue;
src_file = diff_res.keys()[0];
if (not src_file.endswith(".c")):
print "Modified file " + src_file + " not supported!";
continue;
print "src file: ", src_file;
print "diff size: ", diff_res[src_file][0];
repo1.switch_to_rev(parent_rev);
repo2.switch_to_rev(rev);
tmp_argfile_1 = tmp_dir + "/__arg1";
tmp_argfile_2 = tmp_dir + "/__arg2";
if not generate_arg_file(tmp_repo1, src_file, tmp_argfile_1, deps_dir, first):
print "Built failed!";
continue;
if not generate_arg_file(tmp_repo2, src_file, tmp_argfile_2, deps_dir, first):
print "Built failed!";
continue;
first = False;
if (diff(tmp_repo1, src_file, tmp_argfile_1, tmp_repo2, src_file, tmp_argfile_2)):
total_cnt += 1;
if (dump_dir != ""):
ret = dump_rev_source(tmp_repo1, src_file, tmp_argfile_1, dump_dir + "/" + rev + "-1.c");
ret &= dump_rev_source(tmp_repo2, src_file, tmp_argfile_2, dump_dir + "/" + rev + "-2.c");
if not ret:
system("rm -f " + dump_dir + "/" + rev + "-1.c");
system("rm -f " + dump_dir + "/" + rev + "-2.c");
dump_failed.append(rev);
else:
system("cp -f " + tmp_argfile_1 + " " + dump_dir + "/" + rev + "-1.argf");
system("cp -f " + tmp_argfile_2 + " " + dump_dir + "/" + rev + "-2.argf");
print >>fout, rev;
else:
print >>fout, rev;
#system("rm -rf " + tmp_argfile_1);
#system("rm -rf " + tmp_argfile_2);
fout.close();
#system("rm -rf " + tmp_dir);
print "Total 1 mod rev: ", total_cnt;
if len(dump_failed) != 0:
print "Failed dump revs:";
for rev in dump_failed:
print rev;
|
# -*- coding: utf-8 -*-
import glfw
import imgui
import OpenGL.GL as gl
from imgui.integrations.glfw import GlfwRenderer
import flags
from project import show_labelFM_info
from project import show_labelFM_state
from project import show_labelFM_window
from project import show_labelFM_setting_APP
def impl_glfw_init():
width, height = 1080, 960
window_name = "LabelFM ImGui/GLFW3 Space"
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
def main():
# flags references
LABELFM_WINDOW = flags.LABELFM_WINDOW
LABELFM_SETTING_APP = flags.LABELFM_SETTING_APP
LABELFM_STATE_WINDOW = flags.LABELFM_STATE_WINDOW
LABELFM_INFO_WINDOW = flags.LABELFM_INFO_WINDOW
LABELFM_ENV = flags.LABELFM_ENV
LABELFM_ANNO_PARAMS = flags.LABELFM_ANNO_PARAMS
# imgui window initialization
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
# main loop
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
imgui.new_frame()
# LabeFM menu bar initialization
if imgui.begin_main_menu_bar():
# Project sub-menu
if imgui.begin_menu("Project", True):
clicked_show, selected_show = imgui.menu_item("Show Project", None, LABELFM_WINDOW["OPENED"], True)
clicked_quit, selected_quit = imgui.menu_item("Quit", None, False, True)
if clicked_quit: exit(1)
if clicked_show: LABELFM_WINDOW["OPENED"] = selected_show
imgui.end_menu()
# Settings sub-menu
if imgui.begin_menu("Settings", True):
clicked_show, selected_show = imgui.menu_item("Show Settings", None, LABELFM_SETTING_APP["OPENED"], True)
if clicked_show: LABELFM_SETTING_APP["OPENED"] = selected_show
imgui.end_menu()
# Help sub-menu
if imgui.begin_menu("Help", True):
clicked_show, selected_show = imgui.menu_item("Show States", None, LABELFM_STATE_WINDOW["OPENED"], True)
clicked_info, selected_info = imgui.menu_item("Info", None, LABELFM_INFO_WINDOW["OPENED"], True)
if clicked_show: LABELFM_STATE_WINDOW["OPENED"] = selected_show
if clicked_info: LABELFM_INFO_WINDOW["OPENED"] = selected_info
imgui.end_menu()
imgui.end_main_menu_bar()
# show labeFM windows
if LABELFM_INFO_WINDOW["OPENED"]: show_labelFM_info(LABELFM_INFO_WINDOW)
if LABELFM_WINDOW["OPENED"]: show_labelFM_window(LABELFM_WINDOW, LABELFM_ENV, LABELFM_ANNO_PARAMS)
if LABELFM_STATE_WINDOW["OPENED"]: show_labelFM_state(LABELFM_WINDOW, LABELFM_STATE_WINDOW, LABELFM_ENV)
if LABELFM_SETTING_APP["OPENED"]: show_labelFM_setting_APP(LABELFM_WINDOW, LABELFM_SETTING_APP, LABELFM_ENV)
# opengl rendering setting
gl.glClearColor(1., 1., 1., 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
# shutdown
impl.shutdown()
glfw.terminate()
if __name__ == "__main__":
main()
|
"""Generated wrapper for BSend Solidity contract."""
# pylint: disable=too-many-arguments
import json
import time
from typing import ( # pylint: disable=unused-import
List,
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from hexbytes import HexBytes
from web3.contract import ContractFunction
from web3.datastructures import AttributeDict
from web3.exceptions import ContractLogicError
from moody import Bolors
from moody.libeb import MiliDoS
from moody.m.bases import ContractMethod, Validator, ContractBase, Signatures
from moody.m.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for BSend below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
BSendValidator,
)
except ImportError:
class BSendValidator( # type: ignore
Validator
):
"""No-op input validator."""
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
class AddSignerMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the addSigner method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("addSigner")
def validate_and_normalize_inputs(self, account: str) -> any:
"""Validate the inputs to the addSigner method."""
self.validator.assert_valid(
method_name='addSigner',
parameter_name='account',
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (account)
def block_send(self, account: str, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(account)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: add_signer")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, account: str, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(account) = self.validate_and_normalize_inputs(account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account).transact(tx_params.as_dict())
def build_transaction(self, account: str, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(account) = self.validate_and_normalize_inputs(account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account).buildTransaction(tx_params.as_dict())
def estimate_gas(self, account: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(account) = self.validate_and_normalize_inputs(account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account).estimateGas(tx_params.as_dict())
class BulkSendTokenMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the bulkSendToken method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("bulkSendToken")
def validate_and_normalize_inputs(self, token_addr: str, addresses: List[str], amounts: List[int]) -> any:
"""Validate the inputs to the bulkSendToken method."""
self.validator.assert_valid(
method_name='bulkSendToken',
parameter_name='tokenAddr',
argument_value=token_addr,
)
token_addr = self.validate_and_checksum_address(token_addr)
self.validator.assert_valid(
method_name='bulkSendToken',
parameter_name='addresses',
argument_value=addresses,
)
self.validator.assert_valid(
method_name='bulkSendToken',
parameter_name='amounts',
argument_value=amounts,
)
return (token_addr, addresses, amounts)
def block_send(self, token_addr: str, addresses: List[str], amounts: List[int], _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(token_addr, addresses, amounts)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: bulk_send_token")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, token_addr: str, addresses: List[str], amounts: List[int], tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_addr, addresses, amounts) = self.validate_and_normalize_inputs(token_addr, addresses, amounts)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_addr, addresses, amounts).transact(tx_params.as_dict())
def build_transaction(self, token_addr: str, addresses: List[str], amounts: List[int], tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(token_addr, addresses, amounts) = self.validate_and_normalize_inputs(token_addr, addresses, amounts)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_addr, addresses, amounts).buildTransaction(tx_params.as_dict())
def estimate_gas(self, token_addr: str, addresses: List[str], amounts: List[int], tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(token_addr, addresses, amounts) = self.validate_and_normalize_inputs(token_addr, addresses, amounts)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_addr, addresses, amounts).estimateGas(tx_params.as_dict())
class BulkSendTrxMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the bulkSendTrx method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("bulkSendTrx")
def validate_and_normalize_inputs(self, addresses: List[str], amounts: List[int]) -> any:
"""Validate the inputs to the bulkSendTrx method."""
self.validator.assert_valid(
method_name='bulkSendTrx',
parameter_name='addresses',
argument_value=addresses,
)
self.validator.assert_valid(
method_name='bulkSendTrx',
parameter_name='amounts',
argument_value=amounts,
)
return (addresses, amounts)
def block_send(self, addresses: List[str], amounts: List[int], _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(addresses, amounts)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: bulk_send_trx")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, addresses: List[str], amounts: List[int], tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(addresses, amounts) = self.validate_and_normalize_inputs(addresses, amounts)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addresses, amounts).transact(tx_params.as_dict())
def build_transaction(self, addresses: List[str], amounts: List[int], tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(addresses, amounts) = self.validate_and_normalize_inputs(addresses, amounts)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addresses, amounts).buildTransaction(tx_params.as_dict())
def estimate_gas(self, addresses: List[str], amounts: List[int], tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(addresses, amounts) = self.validate_and_normalize_inputs(addresses, amounts)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addresses, amounts).estimateGas(tx_params.as_dict())
class ClaimInitMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the claimInit method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
self.sign = validator.getSignature("claimInit")
def block_send(self, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method()
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: claim_init")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class DepositMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the deposit method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
self.sign = validator.getSignature("deposit")
def block_send(self, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method()
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: deposit")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class EthSendFeeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the ethSendFee method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
self.sign = validator.getSignature("ethSendFee")
def block_call(self, debug: bool = False) -> int:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class GetBalanceMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getBalance method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("getBalance")
def validate_and_normalize_inputs(self, addr: str) -> any:
"""Validate the inputs to the getBalance method."""
self.validator.assert_valid(
method_name='getBalance',
parameter_name='addr',
argument_value=addr,
)
addr = self.validate_and_checksum_address(addr)
return (addr)
def block_call(self, addr: str, debug: bool = False) -> int:
_fn = self._underlying_method(addr)
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, addr: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(addr) = self.validate_and_normalize_inputs(addr)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addr).estimateGas(tx_params.as_dict())
class IsSignerMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the isSigner method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("isSigner")
def validate_and_normalize_inputs(self, account: str) -> any:
"""Validate the inputs to the isSigner method."""
self.validator.assert_valid(
method_name='isSigner',
parameter_name='account',
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (account)
def block_call(self, account: str, debug: bool = False) -> bool:
_fn = self._underlying_method(account)
returned = _fn.call({
'from': self._operate
})
return bool(returned)
def estimate_gas(self, account: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(account) = self.validate_and_normalize_inputs(account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(account).estimateGas(tx_params.as_dict())
class RenounceSignerMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the renounceSigner method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
self.sign = validator.getSignature("renounceSigner")
def block_send(self, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method()
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: renounce_signer")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class SetEthFeeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setEthFee method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("setEthFee")
def validate_and_normalize_inputs(self, eth_send_fee: int) -> any:
"""Validate the inputs to the setEthFee method."""
self.validator.assert_valid(
method_name='setEthFee',
parameter_name='_ethSendFee',
argument_value=eth_send_fee,
)
# safeguard against fractional inputs
eth_send_fee = int(eth_send_fee)
return (eth_send_fee)
def block_send(self, eth_send_fee: int, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(eth_send_fee)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: set_eth_fee")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, eth_send_fee: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(eth_send_fee) = self.validate_and_normalize_inputs(eth_send_fee)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(eth_send_fee).transact(tx_params.as_dict())
def build_transaction(self, eth_send_fee: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(eth_send_fee) = self.validate_and_normalize_inputs(eth_send_fee)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(eth_send_fee).buildTransaction(tx_params.as_dict())
def estimate_gas(self, eth_send_fee: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(eth_send_fee) = self.validate_and_normalize_inputs(eth_send_fee)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(eth_send_fee).estimateGas(tx_params.as_dict())
class SetTokenFeeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the setTokenFee method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("setTokenFee")
def validate_and_normalize_inputs(self, token_send_fee: int) -> any:
"""Validate the inputs to the setTokenFee method."""
self.validator.assert_valid(
method_name='setTokenFee',
parameter_name='_tokenSendFee',
argument_value=token_send_fee,
)
# safeguard against fractional inputs
token_send_fee = int(token_send_fee)
return (token_send_fee)
def block_send(self, token_send_fee: int, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(token_send_fee)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: set_token_fee")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, token_send_fee: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_send_fee) = self.validate_and_normalize_inputs(token_send_fee)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_send_fee).transact(tx_params.as_dict())
def build_transaction(self, token_send_fee: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(token_send_fee) = self.validate_and_normalize_inputs(token_send_fee)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_send_fee).buildTransaction(tx_params.as_dict())
def estimate_gas(self, token_send_fee: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(token_send_fee) = self.validate_and_normalize_inputs(token_send_fee)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_send_fee).estimateGas(tx_params.as_dict())
class TokenSendFeeMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the tokenSendFee method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address)
self._underlying_method = contract_function
self.sign = validator.getSignature("tokenSendFee")
def block_call(self, debug: bool = False) -> int:
_fn = self._underlying_method()
returned = _fn.call({
'from': self._operate
})
return int(returned)
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class WithdrawEtherMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the withdrawEther method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("withdrawEther")
def validate_and_normalize_inputs(self, addr: str, amount: int) -> any:
"""Validate the inputs to the withdrawEther method."""
self.validator.assert_valid(
method_name='withdrawEther',
parameter_name='addr',
argument_value=addr,
)
addr = self.validate_and_checksum_address(addr)
self.validator.assert_valid(
method_name='withdrawEther',
parameter_name='amount',
argument_value=amount,
)
# safeguard against fractional inputs
amount = int(amount)
return (addr, amount)
def block_send(self, addr: str, amount: int, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(addr, amount)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: withdraw_ether")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, addr: str, amount: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(addr, amount) = self.validate_and_normalize_inputs(addr, amount)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addr, amount).transact(tx_params.as_dict())
def build_transaction(self, addr: str, amount: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(addr, amount) = self.validate_and_normalize_inputs(addr, amount)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addr, amount).buildTransaction(tx_params.as_dict())
def estimate_gas(self, addr: str, amount: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(addr, amount) = self.validate_and_normalize_inputs(addr, amount)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(addr, amount).estimateGas(tx_params.as_dict())
class WithdrawTokenMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the withdrawToken method."""
def __init__(self, elib: MiliDoS, contract_address: str, contract_function: ContractFunction, validator: Validator = None):
"""Persist instance data."""
super().__init__(elib, contract_address, validator)
self._underlying_method = contract_function
self.sign = validator.getSignature("withdrawToken")
def validate_and_normalize_inputs(self, token_addr: str, to: str, amount: int) -> any:
"""Validate the inputs to the withdrawToken method."""
self.validator.assert_valid(
method_name='withdrawToken',
parameter_name='tokenAddr',
argument_value=token_addr,
)
token_addr = self.validate_and_checksum_address(token_addr)
self.validator.assert_valid(
method_name='withdrawToken',
parameter_name='_to',
argument_value=to,
)
to = self.validate_and_checksum_address(to)
self.validator.assert_valid(
method_name='withdrawToken',
parameter_name='_amount',
argument_value=amount,
)
# safeguard against fractional inputs
amount = int(amount)
return (token_addr, to, amount)
def block_send(self, token_addr: str, to: str, amount: int, _gaswei: int, _pricewei: int, _valeth: int = 0, _debugtx: bool = False, _receipList: bool = False) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
_fn = self._underlying_method(token_addr, to, amount)
try:
_t = _fn.buildTransaction({
'from': self._operate,
'gas': _gaswei,
'gasPrice': _pricewei
})
_t['nonce'] = self._web3_eth.getTransactionCount(self._operate)
if _valeth > 0:
_t['value'] = _valeth
if _debugtx:
print(f"======== Signing ✅ by {self._operate}")
print(f"======== Transaction ✅ check")
print(_t)
if 'data' in _t:
signed = self._web3_eth.account.sign_transaction(_t)
txHash = self._web3_eth.sendRawTransaction(signed.rawTransaction)
tx_receipt = None
if _receipList is True:
print(f"======== awaiting Confirmation 🚸️ {self.sign}")
tx_receipt = self._web3_eth.waitForTransactionReceipt(txHash)
if _debugtx:
print("======== TX Result ✅")
print(tx_receipt)
print(f"======== TX blockHash ✅")
if tx_receipt is not None:
print(f"{Bolors.OK}{tx_receipt.blockHash.hex()}{Bolors.RESET}")
else:
print(f"{Bolors.WARNING}{txHash.hex()}{Bolors.RESET} - broadcast hash")
if _receipList is False:
time.sleep(self._wait)
except ContractLogicError as er:
print(f"{Bolors.FAIL}Error {er} {Bolors.RESET}: withdraw_token")
except ValueError as err:
if "message" in err.args[0]:
message = err.args[0]["message"]
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET} on set_asset_token: {message}")
else:
print(f"{Bolors.FAIL}Error Revert {Bolors.RESET}: set_asset_token")
def send_transaction(self, token_addr: str, to: str, amount: int, tx_params: Optional[TxParams] = None) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(token_addr, to, amount) = self.validate_and_normalize_inputs(token_addr, to, amount)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_addr, to, amount).transact(tx_params.as_dict())
def build_transaction(self, token_addr: str, to: str, amount: int, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
(token_addr, to, amount) = self.validate_and_normalize_inputs(token_addr, to, amount)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_addr, to, amount).buildTransaction(tx_params.as_dict())
def estimate_gas(self, token_addr: str, to: str, amount: int, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(token_addr, to, amount) = self.validate_and_normalize_inputs(token_addr, to, amount)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(token_addr, to, amount).estimateGas(tx_params.as_dict())
class SignatureGenerator(Signatures):
"""
The signature is generated for this and it is installed.
"""
def __init__(self, abi: any):
super().__init__(abi)
def add_signer(self) -> str:
return self._function_signatures["addSigner"]
def bulk_send_token(self) -> str:
return self._function_signatures["bulkSendToken"]
def bulk_send_trx(self) -> str:
return self._function_signatures["bulkSendTrx"]
def claim_init(self) -> str:
return self._function_signatures["claimInit"]
def deposit(self) -> str:
return self._function_signatures["deposit"]
def eth_send_fee(self) -> str:
return self._function_signatures["ethSendFee"]
def get_balance(self) -> str:
return self._function_signatures["getBalance"]
def is_signer(self) -> str:
return self._function_signatures["isSigner"]
def renounce_signer(self) -> str:
return self._function_signatures["renounceSigner"]
def set_eth_fee(self) -> str:
return self._function_signatures["setEthFee"]
def set_token_fee(self) -> str:
return self._function_signatures["setTokenFee"]
def token_send_fee(self) -> str:
return self._function_signatures["tokenSendFee"]
def withdraw_ether(self) -> str:
return self._function_signatures["withdrawEther"]
def withdraw_token(self) -> str:
return self._function_signatures["withdrawToken"]
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class BSend(ContractBase):
"""Wrapper class for BSend Solidity contract."""
_fn_add_signer: AddSignerMethod
"""Constructor-initialized instance of
:class:`AddSignerMethod`.
"""
_fn_bulk_send_token: BulkSendTokenMethod
"""Constructor-initialized instance of
:class:`BulkSendTokenMethod`.
"""
_fn_bulk_send_trx: BulkSendTrxMethod
"""Constructor-initialized instance of
:class:`BulkSendTrxMethod`.
"""
_fn_claim_init: ClaimInitMethod
"""Constructor-initialized instance of
:class:`ClaimInitMethod`.
"""
_fn_deposit: DepositMethod
"""Constructor-initialized instance of
:class:`DepositMethod`.
"""
_fn_eth_send_fee: EthSendFeeMethod
"""Constructor-initialized instance of
:class:`EthSendFeeMethod`.
"""
_fn_get_balance: GetBalanceMethod
"""Constructor-initialized instance of
:class:`GetBalanceMethod`.
"""
_fn_is_signer: IsSignerMethod
"""Constructor-initialized instance of
:class:`IsSignerMethod`.
"""
_fn_renounce_signer: RenounceSignerMethod
"""Constructor-initialized instance of
:class:`RenounceSignerMethod`.
"""
_fn_set_eth_fee: SetEthFeeMethod
"""Constructor-initialized instance of
:class:`SetEthFeeMethod`.
"""
_fn_set_token_fee: SetTokenFeeMethod
"""Constructor-initialized instance of
:class:`SetTokenFeeMethod`.
"""
_fn_token_send_fee: TokenSendFeeMethod
"""Constructor-initialized instance of
:class:`TokenSendFeeMethod`.
"""
_fn_withdraw_ether: WithdrawEtherMethod
"""Constructor-initialized instance of
:class:`WithdrawEtherMethod`.
"""
_fn_withdraw_token: WithdrawTokenMethod
"""Constructor-initialized instance of
:class:`WithdrawTokenMethod`.
"""
SIGNATURES: SignatureGenerator = None
def __init__(
self,
core_lib: MiliDoS,
contract_address: str,
validator: BSendValidator = None,
):
"""Get an instance of wrapper for smart contract.
"""
# pylint: disable=too-many-statements
super().__init__()
self.contract_address = contract_address
web3 = core_lib.w3
if not validator:
validator = BSendValidator(web3, contract_address)
# if any middleware was imported, inject it
try:
MIDDLEWARE
except NameError:
pass
else:
try:
for middleware in MIDDLEWARE:
web3.middleware_onion.inject(
middleware['function'], layer=middleware['layer'],
)
except ValueError as value_error:
if value_error.args == ("You can't add the same un-named instance twice",):
pass
self._web3_eth = web3.eth
functions = self._web3_eth.contract(address=to_checksum_address(contract_address), abi=BSend.abi()).functions
signed = SignatureGenerator(BSend.abi())
validator.bindSignatures(signed)
self.SIGNATURES = signed
self._fn_add_signer = AddSignerMethod(core_lib, contract_address, functions.addSigner, validator)
self._fn_bulk_send_token = BulkSendTokenMethod(core_lib, contract_address, functions.bulkSendToken, validator)
self._fn_bulk_send_trx = BulkSendTrxMethod(core_lib, contract_address, functions.bulkSendTrx, validator)
self._fn_claim_init = ClaimInitMethod(core_lib, contract_address, functions.claimInit, validator)
self._fn_deposit = DepositMethod(core_lib, contract_address, functions.deposit, validator)
self._fn_eth_send_fee = EthSendFeeMethod(core_lib, contract_address, functions.ethSendFee, validator)
self._fn_get_balance = GetBalanceMethod(core_lib, contract_address, functions.getBalance, validator)
self._fn_is_signer = IsSignerMethod(core_lib, contract_address, functions.isSigner, validator)
self._fn_renounce_signer = RenounceSignerMethod(core_lib, contract_address, functions.renounceSigner, validator)
self._fn_set_eth_fee = SetEthFeeMethod(core_lib, contract_address, functions.setEthFee, validator)
self._fn_set_token_fee = SetTokenFeeMethod(core_lib, contract_address, functions.setTokenFee, validator)
self._fn_token_send_fee = TokenSendFeeMethod(core_lib, contract_address, functions.tokenSendFee, validator)
self._fn_withdraw_ether = WithdrawEtherMethod(core_lib, contract_address, functions.withdrawEther, validator)
self._fn_withdraw_token = WithdrawTokenMethod(core_lib, contract_address, functions.withdrawToken, validator)
def event_signer_added(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event signer_added in contract BSend
Get log entry for SignerAdded event.
:param tx_hash: hash of transaction emitting SignerAdded event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=BSend.abi()).events.SignerAdded().processReceipt(tx_receipt)
def event_signer_removed(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""
Implementation of event signer_removed in contract BSend
Get log entry for SignerRemoved event.
:param tx_hash: hash of transaction emitting SignerRemoved event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return self._web3_eth.contract(address=to_checksum_address(self.contract_address), abi=BSend.abi()).events.SignerRemoved().processReceipt(tx_receipt)
def add_signer(self, account: str) -> None:
"""
Implementation of add_signer in contract BSend
Method of the function
"""
return self._fn_add_signer.block_send(account, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def bulk_send_token(self, token_addr: str, addresses: List[str], amounts: List[int], wei: int = 0) -> bool:
"""
Implementation of bulk_send_token in contract BSend
Method of the function
"""
return self._fn_bulk_send_token.block_send(token_addr, addresses, amounts, self.call_contract_fee_amount, self.call_contract_fee_price, wei, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def bulk_send_trx(self, addresses: List[str], amounts: List[int], wei: int = 0) -> bool:
"""
Implementation of bulk_send_trx in contract BSend
Method of the function
"""
return self._fn_bulk_send_trx.block_send(addresses, amounts, self.call_contract_fee_amount, self.call_contract_fee_price, wei, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def claim_init(self) -> None:
"""
Implementation of claim_init in contract BSend
Method of the function
"""
return self._fn_claim_init.block_send(self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def deposit(self, wei: int = 0) -> bool:
"""
Implementation of deposit in contract BSend
Method of the function
"""
return self._fn_deposit.block_send(self.call_contract_fee_amount, self.call_contract_fee_price, wei, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def eth_send_fee(self) -> int:
"""
Implementation of eth_send_fee in contract BSend
Method of the function
"""
return self._fn_eth_send_fee.block_call()
def get_balance(self, addr: str) -> int:
"""
Implementation of get_balance in contract BSend
Method of the function
"""
return self._fn_get_balance.block_call(addr)
def is_signer(self, account: str) -> bool:
"""
Implementation of is_signer in contract BSend
Method of the function
"""
return self._fn_is_signer.block_call(account)
def renounce_signer(self) -> None:
"""
Implementation of renounce_signer in contract BSend
Method of the function
"""
return self._fn_renounce_signer.block_send(self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def set_eth_fee(self, eth_send_fee: int) -> bool:
"""
Implementation of set_eth_fee in contract BSend
Method of the function
"""
return self._fn_set_eth_fee.block_send(eth_send_fee, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def set_token_fee(self, token_send_fee: int) -> bool:
"""
Implementation of set_token_fee in contract BSend
Method of the function
"""
return self._fn_set_token_fee.block_send(token_send_fee, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def token_send_fee(self) -> int:
"""
Implementation of token_send_fee in contract BSend
Method of the function
"""
return self._fn_token_send_fee.block_call()
def withdraw_ether(self, addr: str, amount: int) -> bool:
"""
Implementation of withdraw_ether in contract BSend
Method of the function
"""
return self._fn_withdraw_ether.block_send(addr, amount, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def withdraw_token(self, token_addr: str, to: str, amount: int) -> bool:
"""
Implementation of withdraw_token in contract BSend
Method of the function
"""
return self._fn_withdraw_token.block_send(token_addr, to, amount, self.call_contract_fee_amount, self.call_contract_fee_price, 0, self.call_contract_debug_flag, self.call_contract_enforce_tx_receipt)
def CallContractWait(self, t_long: int) -> "BSend":
self._fn_add_signer.setWait(t_long)
self._fn_bulk_send_token.setWait(t_long)
self._fn_bulk_send_trx.setWait(t_long)
self._fn_claim_init.setWait(t_long)
self._fn_deposit.setWait(t_long)
self._fn_eth_send_fee.setWait(t_long)
self._fn_get_balance.setWait(t_long)
self._fn_is_signer.setWait(t_long)
self._fn_renounce_signer.setWait(t_long)
self._fn_set_eth_fee.setWait(t_long)
self._fn_set_token_fee.setWait(t_long)
self._fn_token_send_fee.setWait(t_long)
self._fn_withdraw_ether.setWait(t_long)
self._fn_withdraw_token.setWait(t_long)
return self
@staticmethod
def abi():
"""Return the ABI to the underlying contract."""
return json.loads(
'[{"inputs":[],"payable":true,"stateMutability":"payable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"account","type":"address"}],"name":"SignerAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"account","type":"address"}],"name":"SignerRemoved","type":"event"},{"constant":false,"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"addSigner","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"tokenAddr","type":"address"},{"internalType":"address[]","name":"addresses","type":"address[]"},{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"name":"bulkSendToken","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"internalType":"address[]","name":"addresses","type":"address[]"},{"internalType":"uint256[]","name":"amounts","type":"uint256[]"}],"name":"bulkSendTrx","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"claimInit","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"deposit","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":true,"inputs":[],"name":"ethSendFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"addr","type":"address"}],"name":"getBalance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"isSigner","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"renounceSigner","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"_ethSendFee","type":"uint256"}],"name":"setEthFee","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"uint256","name":"_tokenSendFee","type":"uint256"}],"name":"setTokenFee","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"tokenSendFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"addr","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"withdrawEther","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"internalType":"address","name":"tokenAddr","type":"address"},{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"withdrawToken","outputs":[{"internalType":"bool","name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]'
# noqa: E501 (line-too-long)
)
# pylint: disable=too-many-lines
|
"""Copyright (c) 2014, Dilithium Power Systems LLC All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Dilithium Power Systems LLC.
"""
import eeprom
import logging
import struct
import time
import can_ethernet
import can_msg_pb2
class MpptNotPresent(Exception):
pass
class BadPacket(Exception):
pass
class mppt():
# initialize tracker structure also initialize the eeprom
def __init__(self, channel, baseid, can):
self.canAddress = baseid + channel
self.can = can
self.temp = 0
self.vin = 0
self.vout = 0
self.iin = 0
self.iout = 0
# debug level
self.debug = 0
self.found = self.detectMPPT()
if self.found:
self.ee = eeprom.eeprom(channel, baseid, can, self)
else:
raise MpptNotPresent('MPPT 0x%03x not detected' % self.canAddress)
def RTRPacket(self, canAddress, retries, timeout):
for _ in xrange(retries):
# form the rtr packet to send
tx = can_msg_pb2.CanMessage()
tx.id = canAddress
tx.type = can_msg_pb2.STD_RTR
tx.data.extend([0] * 8)
# send the packet
self.can.SendPkt(tx)
try:
rx = self.can.WaitForPacket(tx, timeout)
except can_ethernet.TimeoutError:
return None
return rx
# detect if mppt is on the bus
def detectMPPT(self):
# try to detect mppt through RTR packet
rx = self.RTRPacket(self.canAddress, 3, 0.1)
# report what happened
if rx:
logging.info('MPPT Detected at address: 0x%03X' % (self.canAddress))
return True
else:
logging.info('Failed to Discover MPPT: 0x%03X' % (self.canAddress))
return False
def getStateData(self):
# get the state of the mppt - voltage in, volatge out, current and
# temperature
rx = self.RTRPacket(self.canAddress, 3, 0.1)
self.parseStatePacket(rx)
def setEnable(self, enable, leds='on'):
# this function will send the packet to enable or disable the mppt
param = 0
if(enable == 'on'):
param = param | 0x01
try_report(self.rpt,
'Enabled MPPT',
False,
False,
'0x{0:X}'.format(self.canAddress),
'')
elif(enable == 'off'):
param = param & 0xfe
try_report(self.rpt,
'Disabled MPPT',
False,
False,
'0x{0:X}'.format(self.canAddress),
'')
else:
param = param & 0xfe
if(leds == 'on'):
param = param & 0xfd
try_report(self.rpt,
'Enabled LEDs',
False,
False,
'0x{0:X}'.format(self.canAddress),
'')
elif(leds == 'off'):
param = param | 0x02
try_report(self.rpt,
'Disabled LEDs',
False,
False,
'0x{0:X}'.format(self.canAddress),
'')
else:
param = param & 0xfd
tx = canPacket('t', self.canAddress + 0x10, 1, [param], False, False)
self.can.sendPkt(tx)
# this function will force the mppt duty cycle to a fixed value
# eeprom testMode must be set to 1 for this command to function
def setDutyCycle(self, dutyCycle):
s = struct.pack('>H', dutyCycle)
output = struct.unpack('>BB', s)
# make packet with magic sequence
tx = canPacket('t', self.canAddress + 0x70, 8, [output[0],
output[1],
0, 0, 45, 78, 69, 0])
# send the packet
self.can.sendPkt(tx)
# this function will take the returned state packet and parses it out to
# usable state variables
def parseStatePacket(self, pkt):
if not pkt.dlc == 8:
raise BadPacket('DLC must = 8: %d' % pkt.dlc)
if not pkt.id == self.canAddress:
raise BadPacket('Address must = self.canAddress: %d' % pkt.id)
self.vin = float(pkt.data[1] * 0x100 + pkt.data[0]) / 100.0
self.vout = float(pkt.data[5] * 0x100 + pkt.data[4]) / 100.0
self.iin = float(pkt.data[3] * 0x100 + pkt.data[2]) / 1000.0
self.temp = float(pkt.data[7] * 0x100 + pkt.data[6]) / 100.0
def reset(self):
logging.info('Resetting MPPT...')
# create the packet structure
tx = can_msg_pb2.CanMessage()
tx.id = self.canAddress + 0x30
# build the can packet payload with the magic number
tx.data.extend([0, 0, 0, 0, 45, 78, 69, 0xfe])
# send the packet on the bus
self.can.SendPkt(tx)
logging.info('Waiting for MPPT to initialize...')
time.sleep(6)
logging.info('Done.')
|
#!/usr/bin/env python
from ciscoconfparse import CiscoConfParse
from pprint import pprint as pp
ciscocfg = CiscoConfParse("cisco_ipsec.txt")
transform_sets = ciscocfg.find_objects_w_child(parentspec=r'crypto map CRYPTO', childspec='3DES')
print("The following crypto maps are not using AES:\n")
for line in transform_sets:
print line.text
|
from __future__ import division, print_function, absolute_import
from . import util
import numpy as np
class PatternFilterer(object):
#The idea is that 'patterns' gets divided into the patterns that pass and
# the patterns that get filtered
def __call__(self, patterns):
raise NotImplementedError()
def chain(self, pattern_filterer):
def func(patterns):
passing_patterns1, filtered_patterns1 = self(patterns)
passing_patterns2, filtered_patterns2 =\
pattern_filterer(passing_patterns1)
final_passing = passing_patterns2
final_filtered = list(filtered_patterns1)+list(filtered_patterns2)
#sanity check to make sure no patterns got lost
assert len(final_filtered)+len(final_passing) == len(patterns)
return (final_passing, final_filtered)
return FuncPatternFilterer(function=func)
class FuncPatternFilterer(PatternFilterer):
def __init__(self, function):
self.function = function
def __call__(self, patterns):
return self.function(patterns)
class ConditionPatternFilterer(PatternFilterer):
def _condition(self, pattern):
raise NotImplementedError()
def __call__(self, patterns):
filtered_patterns = []
passing_patterns = []
for pattern in patterns:
if self._condition(pattern):
passing_patterns.append(pattern)
else:
filtered_patterns.append(pattern)
return (passing_patterns, filtered_patterns)
class MinSeqletSupportFilterer(ConditionPatternFilterer):
#filter out patterns that don't have at least min_seqlet_support
def __init__(self, min_seqlet_support):
self.min_seqlet_support = min_seqlet_support
def _condition(self, pattern):
return len(pattern.seqlets) >= self.min_seqlet_support
class MinICinWindow(ConditionPatternFilterer):
#filter out patterns that don't have at least min_seqlet_support
def __init__(self, window_size, min_ic_in_window, background,
sequence_track_name,
ppm_pseudocount):
self.window_size = window_size
self.min_ic_in_window = min_ic_in_window
self.background = background
self.sequence_track_name = sequence_track_name
self.ppm_pseudocount = 0.001
def _condition(self, pattern):
ppm = pattern[self.sequence_track_name].fwd
#compute per-position ic for the pattern
per_position_ic = util.compute_per_position_ic(
ppm=ppm, background=self.background,
pseudocount=self.ppm_pseudocount)
if (len(per_position_ic) < self.window_size):
print("WARNING: motif length is < window_size")
return np.sum(per_position_ic) >= self.min_ic_in_window
else:
#do the sliding window sum rearrangement
windowed_ic = np.sum(util.rolling_window(
a=per_position_ic, window=self.window_size),
axis=-1)
return np.max(windowed_ic) >= self.min_ic_in_window
|
#!/usr/bin/env python
from wordcount import load_word_counts
import sys
def top_two_word(counts):
"""
Given a list of (word, count, percentage) tuples,
return the top two word counts.
"""
limited_counts = counts[0:2]
count_data = [count for (_, count, _) in limited_counts]
return count_data
if __name__ == '__main__':
input_files = sys.argv[1:]
print("Book\tFirst\tSecond\tRatio")
for input_file in input_files:
counts = load_word_counts(input_file)
[first, second] = top_two_word(counts)
bookname = input_file[:-4]
print("%s\t%i\t%i\t%.2f" %(bookname, first, second, float(first)/second))
|
###############################################################################
# Copyright 2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import json
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from .entity import *
#######################################################################################################################
class Contact(Entity):
def __init__(self):
Entity.__init__(self)
self.Detail = None # A uri embedding the contact inforation
self.Type = None # ContactType_t
self.ServiceID = [] # string uri
self.DomainID = [] # string uri
#######################################################################################################################
class ContactOgfJson(EntityOgfJson):
data_cls = Contact
def __init__(self, data):
EntityOgfJson.__init__(self,data)
def get(self):
return json.dumps(self.toJson(),sort_keys=True,indent=4)
def toJson(self):
doc = EntityOgfJson.toJson(self)
if self.data.Detail is not None:
doc["Detail"] = self.data.Detail
if self.data.Type is not None:
doc["Type"] = self.data.Type
if len(self.data.ServiceID) > 0:
doc["ServiceID"] = self.data.ServiceID
if len(self.data.DomainID) > 0:
doc["DomainID"] = self.data.DomainID
return doc
#######################################################################################################################
|
import time
import json
import web3
import test_utilities
from integration_test_context import main, common, eth
from common import *
from eth import ETH
cmd = main.Integrator()
def web3_connect_ws(host, port):
return web3.Web3(web3.Web3.WebsocketProvider("ws://{}:{}".format(host, port)))
def get_web3_connection_for_test():
ethereum_ws_url = test_utilities.get_required_env_var("ETHEREUM_WEBSOCKET_ADDRESS")
return web3.Web3(web3.WebsocketProvider(ethereum_ws_url))
def get_compiled_sc_ganache(sc_name):
path = main.project_dir("smart-contracts/build/contracts/{}.json".format(sc_name))
return json.loads(cmd.read_text_file(path))
def get_sc_abi_ganache(sc_name):
network_id = 5777
tmp = get_compiled_sc_ganache(sc_name)
return tmp["networks"][str(network_id)]["address"], tmp["abi"]
def get_blocklist_sc(w3):
address, abi = get_sc_abi_ganache("Blocklist")
result = w3.eth.contract(address=address, abi=abi)
return result
def get_bridge_bank_sc(w3):
address, abi = get_sc_abi_ganache("BridgeBank")
# assert address == test_utilities.get_required_env_var("BRIDGE_BANK_ADDRESS")
result = w3.eth.contract(address=address, abi=abi)
return result
def set_blocklist_to(w3, blocklist_sc, addrs):
addrs = [w3.toChecksumAddress(addr) for addr in addrs]
current = blocklist_sc.functions.getFullList().call()
to_add = [addr for addr in addrs if addr not in current]
to_remove = [addr for addr in current if addr not in addrs]
txhash1 = blocklist_sc.functions.batchAddToBlocklist(to_add).transact()
txrcpt1 = w3.eth.wait_for_transaction_receipt(txhash1)
txhash2 = blocklist_sc.functions.batchRemoveFromBlocklist(to_remove).transact()
txrcpt2 = w3.eth.wait_for_transaction_receipt(txhash2)
current = blocklist_sc.functions.getFullList().call()
assert set(addrs) == set(current)
def create_sifchain_addr():
mnemonic = random_string(20)
acct = cmd.sifnoded_keys_add_1(mnemonic)
return acct["address"]
max_gas_required = 200000
def bridge_bank_lock_eth(w3, bridge_bank, from_eth_acct, to_sif_acct, amount):
assert eth.get_eth_balance(w3, from_eth_acct) > max_gas_required, "Not enough gas for test"
recipient = to_sif_acct.encode("UTF-8")
coin_denom = NULL_ADDRESS # For "eth", otherwise use coin's address
txhash = bridge_bank.functions.lock(recipient, coin_denom, amount) \
.transact({"from": from_eth_acct, "gas": max_gas_required, "value": amount})
txrcpt = w3.eth.wait_for_transaction_receipt(txhash)
return txrcpt
def bridge_bank_lock_erc20(w3, bridge_bank, bridge_token, from_eth_acct, to_sif_acct, amount):
assert eth.get_eth_balance(w3, from_eth_acct) > max_gas_required, "Not enough gas for test"
assert eth.get_erc20_token_balance(w3, bridge_token, from_eth_acct) >= amount, "Not enough tokens for test"
recipient = to_sif_acct.encode("UTF-8")
# When transfering ERC20, the amount needs to be passed as argument, and the "message.value" should be 0
txhash = bridge_bank.functions.lock(recipient, bridge_token.address, amount) \
.transact({"from": from_eth_acct, "gas": max_gas_required})
txrcpt = w3.eth.wait_for_transaction_receipt(txhash)
return txrcpt
def create_eth_account(w3, password=""):
# This creates local account, but does not register it (w3.eth.accounts shows the same number)
# account = w3.eth.account.create()
# This creates account in the external node that we're connected to. The node has to support geth extensions.
# These accounts shouw up in w3.eth.accounts and can be used wih transact().
# duration must be specified because the method expects 3 parameters.
account = w3.geth.personal.new_account(password)
w3.geth.personal.unlock_account(account, password, 0)
return account
def create_and_fund_eth_account(w3, source_acct, number, amount_to_fund):
assert w3.eth.get_balance(source_acct) > 2 * amount_to_fund, \
f"Source account {source_acct} has insufficient ether balance"
accounts = [create_eth_account(w3) for _ in range(number)]
for acct in accounts:
start_balance = w3.eth.get_balance(acct)
eth.send_ether(w3, source_acct, acct, amount_to_fund)
assert w3.eth.get_balance(acct) == start_balance + amount_to_fund
return accounts
def advance_block_w3(w3, number):
for _ in range(number):
w3.provider.make_request("evm_mine", [])
def advance_block_truffle(number):
args = ["npx", "truffle", "exec", "scripts/advanceBlock.js", str(number)]
cmd.execst(args, cwd=main.project_dir("smart-contracts"))
def get_sifchain_balance(sif_addr):
args = ["sifnoded", "query", "bank", "balances", sif_addr, "--limit", str(100000000), "--output", "json"]
stdout = cmd.execst(args)[1]
res = json.loads(stdout)["balances"]
return dict(((x["denom"], int(x["amount"])) for x in res))
def sif_balances_equal(dict1, dict2):
d2k = set(dict2.keys())
for k in dict1.keys():
if (k not in dict2) or (dict1[k] != dict2[k]):
return False
d2k.remove(k)
return len(d2k) == 0
def deploy_bridge_token_for_testing(w3, token_symbol, owner_address, mint_amount):
# Get BridgeToken contract; on peggy1 branch it is already deployed by truffle migrate.
sc_json = get_compiled_sc_ganache("BridgeToken")
abi = sc_json["abi"]
bytecode = sc_json["bytecode"]
bridge_token = w3.eth.contract(abi=abi, bytecode=bytecode)
txhash = bridge_token.constructor(token_symbol).transact()
txrcpt = w3.eth.get_transaction_receipt(txhash)
address = txrcpt.contractAddress
bridge_token = w3.eth.contract(address=address, abi=abi)
txhash = bridge_token.functions.mint(owner_address, mint_amount).transact()
txrcpt = w3.eth.get_transaction_receipt(txhash)
assert bridge_token.functions.balanceOf(owner_address).call() == mint_amount
assert bridge_token.functions.totalSupply().call() == mint_amount
assert bridge_token.functions.symbol().call() == token_symbol
assert bridge_token.address == address
return bridge_token
def wait_for_sif_balance_change(sif_addr, old_balances, polling_time=1, timeout=90):
advance_block_truffle(50)
start_time = time.time()
result = None
while result is None:
new_balances = get_sifchain_balance(sif_addr)
if not sif_balances_equal(old_balances, new_balances):
return new_balances
time.sleep(polling_time)
now = time.time()
if now - start_time > timeout:
raise Exception("Timeout waiting for sif balance to change")
def test_blocklist_eth(source_ethereum_address: str):
_test_blocklist_eth(get_web3_connection_for_test(), source_ethereum_address)
def _test_blocklist_eth(w3, source_ethereum_address):
default_account = w3.eth.accounts[0] # Should be deployer
assert default_account == test_utilities.get_required_env_var("OWNER"), "OWNER account is not the same as default"
assert default_account.lower() == source_ethereum_address.lower(), "source_ethereum_address account is not the same as default"
w3.eth.defaultAccount = default_account
amount_to_fund = 1 * ETH
amount_to_send = ETH // 100
acct1, acct2 = create_and_fund_eth_account(w3, default_account, 2, amount_to_fund)
blocklist_sc = get_blocklist_sc(w3)
to_sif_acct = create_sifchain_addr()
sif_symbol = "ceth"
bridge_bank = get_bridge_bank_sc(w3)
filter = bridge_bank.events.LogLock.createFilter(fromBlock="latest")
# Valid negative test outcome: transaction rejected with the string "Address is blocklisted"
def assert_blocked(addr):
assert len(filter.get_new_entries()) == 0
try:
bridge_bank_lock_eth(w3, bridge_bank, addr, to_sif_acct, amount_to_send)
assert False
except ValueError as e:
assert "Address is blocklisted" in e.args[0]["message"]
assert len(filter.get_new_entries()) == 0
# Valid positive test outcome: event emitted, optionally: funds appear on sifchain
def assert_not_blocked(addr):
assert len(filter.get_new_entries()) == 0
balances_before = get_sifchain_balance(to_sif_acct)
txrcpt = bridge_bank_lock_eth(w3, bridge_bank, addr, to_sif_acct, amount_to_send)
balances_after = wait_for_sif_balance_change(to_sif_acct, balances_before)
assert balances_after.get(sif_symbol, 0) == balances_before.get(sif_symbol, 0) + amount_to_send
entries = filter.get_new_entries()
assert len(entries) == 1
assert entries[0].event == "LogLock"
assert entries[0].transactionHash == txrcpt.transactionHash
assert entries[0].address == bridge_bank.address
assert entries[0].args["_from"] == addr
assert entries[0].args["_to"] == to_sif_acct.encode("UTF-8")
assert entries[0].args["_value"] == amount_to_send
try:
assert_not_blocked(acct1)
assert_not_blocked(acct2)
set_blocklist_to(w3, blocklist_sc, [acct2])
assert_not_blocked(acct1)
assert_blocked(acct2)
set_blocklist_to(w3, blocklist_sc, [])
assert_not_blocked(acct1)
assert_not_blocked(acct2)
finally:
w3.eth.uninstall_filter(filter.filter_id)
def test_blocklist_erc20(source_ethereum_address: str):
_test_blocklist_erc20(get_web3_connection_for_test(), source_ethereum_address)
# For ERC20 tokens, we need to create a new instance of Blocklist smart contract, deploy it and whitelist it with
# BridgeBank. In peggy1, the token matching in BridgeBank is done by symbol, so we need to give our token a unique
# symbol such as TEST or MOCK + random suffix + call updateEthWtiteList() + mint() + approve().
# See smart-contracts/test/test_bridgeBank.js:131-160 for example.
def _test_blocklist_erc20(w3, source_ethereum_address):
default_account = w3.eth.accounts[0] # Should be deployer
assert default_account == test_utilities.get_required_env_var("OWNER"), "OWNER account is not the same as default"
assert default_account.lower() == source_ethereum_address.lower(), "source_ethereum_address account is not the same as default"
w3.eth.defaultAccount = default_account
# Must not exist on EVM yet
eth_token_symbol = "MOCK" + random_string(6)
sif_token_symbol = "c" + eth_token_symbol.lower()
bridge_token = deploy_bridge_token_for_testing(w3, eth_token_symbol, default_account, 10**18)
bridge_bank = get_bridge_bank_sc(w3)
assert eth_token_symbol == bridge_token.functions.symbol().call()
amount_to_fund = 1 * ETH
amount_to_send = 1
acct1, acct2 = create_and_fund_eth_account(w3, default_account, 2, amount_to_fund)
for acct in [acct1, acct2]:
# Transfer 10 tokens to account
txhash = bridge_token.functions.transfer(acct, 10).transact()
txrcpt = w3.eth.wait_for_transaction_receipt(txhash)
# Set allowance for BridgeBank to send 10 tokens on behalf of acct1 and acct2
# Without this we get transaction rejected with "SafeERC20: low-level call failed"
txhash = bridge_token.functions.approve(bridge_bank.address, 10).transact({"from": acct})
txrcpt = w3.eth.wait_for_transaction_receipt(txhash)
blocklist_sc = get_blocklist_sc(w3)
to_sif_acct = create_sifchain_addr()
# Token needs to be whitelisted, if it is not, then the transaction will be reverte like this:
# "revert Only token in whitelist can be transferred to cosmos"
# Call of updateEthWhiteList will fail if we try to remove an item from whitelist which is not on the whitelist.
txhash = bridge_bank.functions.updateEthWhiteList(bridge_token.address, True).transact()
txrcpt = w3.eth.wait_for_transaction_receipt(txhash)
filter = bridge_bank.events.LogLock.createFilter(fromBlock="latest")
def assert_blocked(addr):
assert len(filter.get_new_entries()) == 0
try:
bridge_bank_lock_erc20(w3, bridge_bank, bridge_token, addr, to_sif_acct, amount_to_send)
assert False
except ValueError as e:
assert "Address is blocklisted" in e.args[0]["message"]
assert len(filter.get_new_entries()) == 0
def assert_not_blocked(addr):
assert len(filter.get_new_entries()) == 0
balances_before = get_sifchain_balance(to_sif_acct)
txrcpt = bridge_bank_lock_erc20(w3, bridge_bank, bridge_token, addr, to_sif_acct, amount_to_send)
balances_after = wait_for_sif_balance_change(to_sif_acct, balances_before)
assert balances_after.get(sif_token_symbol, 0) == balances_before.get(sif_token_symbol, 0) + amount_to_send
entries = filter.get_new_entries()
assert len(entries) == 1
assert entries[0].event == "LogLock"
assert entries[0].transactionHash == txrcpt.transactionHash
assert entries[0].address == bridge_bank.address
assert entries[0].args["_from"] == addr
assert entries[0].args["_to"] == to_sif_acct.encode("UTF-8")
assert entries[0].args["_value"] == amount_to_send
try:
assert_not_blocked(acct1)
assert_not_blocked(acct2)
set_blocklist_to(w3, blocklist_sc, [acct2])
assert_not_blocked(acct1)
assert_blocked(acct2)
set_blocklist_to(w3, blocklist_sc, [])
assert_not_blocked(acct1)
assert_not_blocked(acct2)
finally:
w3.eth.uninstall_filter(filter.filter_id)
|
import onmt
import onmt.modules
import torch.nn as nn
import torch, math
from torch.nn.modules.loss import _Loss
#~ class LabelSmoothedCrossEntropyCriterion(_Loss):
#~
#~ def __init__(self, n_targets, eps):
#~ super().__init__()
#~ self.eps = eps
#~ self.n_targets = n_targets
#~
#~ @staticmethod
#~ def add_args(parser):
#~ """Add criterion-specific arguments to the parser."""
#~ parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
#~ help='epsilon for label smoothing, 0 means no label smoothing')
#~
#~ def forward(self, model, sample, reduce=True):
#~ """Compute the loss for the given sample.
#~ Returns a tuple with three elements:
#~ 1) the loss, as a Variable
#~ 2) the sample size, which is used as the denominator for the gradient
#~ 3) logging outputs to display while training
#~ """
#~ net_output = model(**sample['net_input'])
#~ lprobs = model.get_normalized_probs(net_output, log_probs=True)
#~ target = sample['target'].unsqueeze(-1)
#~ non_pad_mask = target.ne(self.padding_idx)
#~ nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
#~ smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
#~ if reduce:
#~ nll_loss = nll_loss.sum()
#~ smooth_loss = smooth_loss.sum()
#~ eps_i = self.eps / lprobs.size(-1)
#~ loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
#~
#~ sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
#~ logging_output = {
#~ 'loss': utils.item(loss.data) if reduce else loss.data,
#~ 'nll_loss': utils.item(nll_loss.data) if reduce else loss.data,
#~ 'ntokens': sample['ntokens'],
#~ 'sample_size': sample_size,
#~ }
#~ return loss, sample_size, logging_output
class LossFuncBase(_Loss):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating mutiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
output_size: number of words in vocabulary()
"""
def __init__(self, output_size):
super(LossFuncBase, self).__init__()
self.output_size = output_size
self.padding_idx = onmt.Constants.PAD
def _compute_loss(self, scores, targets):
return NotImplementedError
def forward(self, dists, targets, hiddens, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
class NMTLossFunc(LossFuncBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, output_size, label_smoothing=0.0, shard_size=1):
super(NMTLossFunc, self).__init__(output_size)
self.shard_split = shard_size
if label_smoothing > 0:
# When label smoothing is turned on,
# KL-divergence between q_{smoothed ground truth prob.}(w)
# and p_{prob. computed by model}(w) is minimized.
# If label smoothing value is set to zero, the loss
# is equivalent to NLLLoss or CrossEntropyLoss.
# All non-true labels are uniformly set to low-confidence.
self.smoothing_value = label_smoothing / (output_size - 1)
# ~ self.func = nn.KLDivLoss(size_average=False)
# ~ one_hot = torch.randn(1, output_size)
# ~ one_hot.fill_(self.smoothing_value)
# ~ one_hot[0][self.padding_idx] = 0
# ~ self.register_buffer('one_hot', one_hot)
else:
weight = torch.ones(output_size)
weight[self.padding_idx] = 0
self.func = nn.NLLLoss(weight, reduction='sum')
self.confidence = 1.0 - label_smoothing
self.label_smoothing = label_smoothing
def _compute_loss(self, scores, targets, smooth=True):
gtruth = targets.view(-1) # batch * time
scores = scores.view(-1, scores.size(-1)) # batch * time X vocab_size
if self.confidence < 1: # label smoothing
tdata = gtruth
# squeeze is a trick to know if mask has dimension or not
# ~ mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()
# ~ likelihood = torch.gather(scores, 1, tdata.unsqueeze(1))
# ~ tmp_ = self.one_hot.repeat(gtruth.size(0), 1)
# ~ tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)
# ~ if mask.numel() > 0:
# ~ likelihood.index_fill_(0, mask, 0)
# ~ tmp_.index_fill_(0, mask, 0)
# ~ gtruth = torch.autograd.Variable(tmp_, requires_grad=False)
# ~ loss = self.func(scores, gtruth)
# ~ loss_data = - likelihood.sum(0).item()
lprobs = scores
non_pad_mask = gtruth.ne(self.padding_idx)
nll_loss = -lprobs.gather(1, gtruth.unsqueeze(1))[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.smoothing_value
loss = (1. - self.label_smoothing) * nll_loss + eps_i * smooth_loss
loss_data = nll_loss.item()
else:
loss = self.func(scores.float(), gtruth)
loss_data = loss.data.item()
return (loss, loss_data)
def forward(self, outputs, targets, generator=None, backward=False, mask=None, normalizer=1):
"""
Compute the loss. Subclass must define this method.
Args:
outputs: the predictive output from the model. time x batch x vocab_size
or time x batch x hidden_size
target: the validate target to compare output with. time x batch
generator: in case we want to save memory and
**kwargs(optional): additional info for computing loss.
"""
original_outputs = outputs
batch_size = outputs.size(1)
h_size = outputs.size(-1)
# flatten the output
outputs = outputs.contiguous().view(-1, outputs.size(-1))
targets = targets.view(-1)
if mask is not None:
""" We remove all positions with PAD
to save memory on unwanted positions
"""
flattened_mask = mask.view(-1)
non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
clean_input = outputs.index_select(0, non_pad_indices)
clean_targets = targets.index_select(0, non_pad_indices)
else:
clean_input = outputs
clean_targets = targets
dists = generator(clean_input) #Dists Dimension: (words x target_vocab)
loss, loss_data = self._compute_loss(dists, clean_targets)
if backward:
loss.div(normalizer).backward()
output = dict()
output['loss'] = loss
output['nll'] = loss_data
return output
|
# Test of Wall constructor
# Copyright (C) 2019 Robin, Scheibler, Cyril Cadoux
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
from __future__ import division
import numpy as np
import pyroomacoustics as pra
eps = 1e-6
# The vertices of the wall are assumed to turn counter-clockwise around the
# normal of the wall
# A very simple wall
walls = [
{
"corners": np.array(
[[0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0],]
),
"area": 1,
"absorption": [0.2],
"scattering": [0.1],
},
{
"corners": np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1],]),
"area": 3.4641016151377557, # this is an equilateral triangle with side sqrt(8)
"absorption": [0.2],
"scattering": [0.1],
},
]
def test_wall_3d_construct_0():
"""Tests construction of a wall"""
w_info = walls[0]
wall = pra.wall_factory(w_info["corners"], [0.2], [0.1])
return wall
def test_wall_3d_construct_1():
"""Tests construction of a wall"""
w_info = walls[1]
wall = pra.wall_factory(w_info["corners"], [0.2], [0.1])
return wall
def test_wall_3d_area_0():
"""Tests the area computation"""
w_info = walls[0]
wall = pra.wall_factory(
w_info["corners"], w_info["absorption"], w_info["scattering"]
)
err = abs(wall.area() - w_info["area"])
assert err < 1, "The error is {}".format(err)
def test_wall_3d_area_1():
"""Tests the area computation"""
w_info = walls[1]
wall = pra.wall_factory(
w_info["corners"], w_info["absorption"], w_info["scattering"]
)
err = abs(wall.area() - w_info["area"])
assert err < 1, "The error is {}".format(err)
def test_wall_3d_normal_0():
"""Tests direction of normal wrt to point arrangement"""
w_info = walls[0]
wall1 = pra.wall_factory(w_info["corners"], [0.2], [0.1])
# the same wall with normal pointing the other way
wall2 = pra.wall_factory(w_info["corners"][:, ::-1], [0.2], [0.1])
err = np.linalg.norm(wall1.normal + wall2.normal)
assert err < eps, "The error is {}".format(err)
def test_wall_3d_normal_1():
"""Tests direction of normal wrt to point arrangement"""
w_info = walls[1]
wall1 = pra.wall_factory(w_info["corners"], [0.2], [0.1])
# the same wall with normal pointing the other way
wall2 = pra.wall_factory(w_info["corners"][:, ::-1], [0.2], [0.1])
err = np.linalg.norm(wall1.normal + wall2.normal)
assert err < eps, "The error is {}".format(err)
if __name__ == "__main__":
wall0 = test_wall_3d_construct_0()
test_wall_3d_normal_0()
test_wall_3d_area_0()
wall1 = test_wall_3d_construct_1()
test_wall_3d_normal_1()
test_wall_3d_area_1()
|
import pygame
class Resource:
def __init__(self):
self.images = {}
self.sounds = {}
self.music = {}
def load(self):
print("Loading resources...")
self.images["blood"] = pygame.image.load("snake/images/blood.png").convert_alpha()
self.images["dirt"] = pygame.image.load("snake/images/dirt.png")
self.sounds["eat"] = pygame.mixer.Sound("snake/music/eat.wav")
self.sounds["enter"] = pygame.mixer.Sound("snake/music/match2.wav")
self.sounds["dead"] = pygame.mixer.Sound("snake/music/dead.wav")
self.music["menu"] = "snake/music/menubgm.mp3"
print("Resources loaded.")
def get_image(self, name):
try:
return self.images[name]
except:
print("Image " + name + " not found")
def get_sound(self, name):
try:
return self.sounds[name]
except:
print("Sound " + name + " not found")
pygame.quit()
def play_music(self, name, loops=0):
try:
pygame.mixer.music.load(self.music[name])
pygame.mixer.music.play(loops=loops)
except:
print("Music " + name + " not found")
pygame.quit()
R = Resource()
|
numbers = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
letters = ('a', 'b', 'c', 'd')
# slicing of tuples
print(numbers[2:5])
even_nums = numbers[1::2]
odd_nums = numbers[0::2]
print('even numbers', even_nums)
print('odd numbers', odd_nums)
# iteration with tuples
for n in numbers:
print(n ** 2)
# functions with tuples
def area_and_circumference_of_circle(radius):
import math
return (math.pi * radius ** 2, 2 * math.pi * radius)
print(area_and_circumference_of_circle(9))
# common errors with tuples (assignment, iteration)
letters = letters + 'e'
|
# Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from django.contrib import admin
from .models import CategoryResult, GradeCategory, GradeCategoryAspect, CategoryAspectResult, ResultOptions
class GradeCategoryAdmin(admin.ModelAdmin):
list_filter = ['TimeSlot']
class GradeCategoryAspectAdmin(admin.ModelAdmin):
list_filter = ('Category', 'Category__TimeSlot')
class CategoryResultAdmin(admin.ModelAdmin):
list_filter = ('Category', 'Category__TimeSlot')
class CategoryAspectResultAdmin(admin.ModelAdmin):
list_filter = ('CategoryAspect__Category__TimeSlot', 'CategoryAspect__Category', 'CategoryResult')
admin.site.register(ResultOptions, )
admin.site.register(GradeCategoryAspect, GradeCategoryAspectAdmin)
admin.site.register(CategoryAspectResult, CategoryAspectResultAdmin)
admin.site.register(CategoryResult, CategoryResultAdmin)
admin.site.register(GradeCategory, GradeCategoryAdmin)
|
'''OpenGL extension INTEL.parallel_arrays
This module customises the behaviour of the
OpenGL.raw.GL.INTEL.parallel_arrays to provide a more
Python-friendly API
Overview (from the spec)
This extension adds the ability to format vertex arrays in a way that's
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/INTEL/parallel_arrays.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INTEL.parallel_arrays import *
from OpenGL.raw.GL.INTEL.parallel_arrays import _EXTENSION_NAME
def glInitParallelArraysINTEL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glVertexPointervINTEL=wrapper.wrapper(glVertexPointervINTEL).setInputArraySize(
'pointer', 4
)
glNormalPointervINTEL=wrapper.wrapper(glNormalPointervINTEL).setInputArraySize(
'pointer', 4
)
glColorPointervINTEL=wrapper.wrapper(glColorPointervINTEL).setInputArraySize(
'pointer', 4
)
glTexCoordPointervINTEL=wrapper.wrapper(glTexCoordPointervINTEL).setInputArraySize(
'pointer', 4
)
### END AUTOGENERATED SECTION
|
import os
import sys
import openpyxl
import argparse
import re
from common import *
__version__ = '0.0.1'
# cli parser definition
parser = argparse.ArgumentParser('tag_title_row')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('file', type=str, help='file name to be read in.')
parser.add_argument('start_col', type=str, help='Column to begin adding tags to.')
parser.add_argument('stop_col', type=str, help='Column to end adding tags to.')
parser.add_argument('--sheet', '-s', type=str, help='sheet name to pull data from')
parser.add_argument('--output', '-o', type=str, help='file name to save as')
parser.add_argument('--inclusive', '-i', action="store_false", # default=True,
help='whether to use end column as inclusive or exclusive endpoint')
parser.add_argument('--direct_col', '-d', action="store_true", # default=False,
help='use direct column name instead of name defined in first row of column')
# main function
@time_execution
def main():
args = parser.parse_args()
wb, ws = get_workbook(args)
title_mapping = excel_mappings(ws[1])
# cols = parse_cols(args.col_name, title_mapping)
if args.direct_col:
cols = get_cols_by_name(args.start_col, args.stop_col, inclusive=args.inclusive)
else:
cols = get_cols_by_name(title_mapping[args.start_col], title_mapping[args.stop_col], inclusive=args.inclusive)
format_title_row(ws[1], cols)
write_workbook(wb, args)
if __name__ == '__main__':
main()
sys.exit(0)
|
from compas_fab.backends.ros.messages import Header
from compas_fab.backends.ros.messages import ROSmsg
from compas_fab.backends.ros.messages import String
from compas_fab.backends.ros.messages import Time
from compas_fab.backends.ros.messages import Float32MultiArray
from compas_fab.backends.ros.messages import Int8MultiArray
from compas_fab.backends.ros.messages import Pose
from compas_fab.backends.ros.messages import PoseArray
def test_rosmsg_attributes():
r = ROSmsg(a=1, b=2)
assert r.a == 1
assert r.b == 2
def test_rosmsg_str():
r = ROSmsg(a=1, b='2')
assert str(r) == str(dict(a=1, b='2'))
def test_nested_repr():
t = Time(80, 20)
h = Header(seq=10, stamp=t, frame_id='/wow')
assert repr(h) == "Header(seq=10, stamp=Time(secs=80, nsecs=20), frame_id='/wow')"
def test_subclasses_define_type_name():
for cls in ROSmsg.__subclasses__():
assert cls.ROS_MSG_TYPE is not None, 'Class {} does not define its msg type'.format(cls.__name__)
def test_consistent_naming():
for cls in ROSmsg.__subclasses__():
assert cls.ROS_MSG_TYPE.split('/')[1] == cls.__name__, 'Class {} does not match to the ROS msg type name={}'.format(cls.__name__, cls.ROS_MSG_TYPE)
def test_uniqueness_of_msg_type():
all_types = [(cls.ROS_MSG_TYPE, cls) for cls in ROSmsg.__subclasses__()]
seen = set()
dupes = set()
for msg_type, cls in all_types:
if msg_type not in seen:
seen.add(msg_type)
else:
dupes.add(cls.__name__)
assert len(dupes) == 0, 'The classes {} define duplicate ROS MSG TYPEs'.format(str(dupes))
def test_parse_from_json():
msg = ROSmsg.parse('{"data": "Hello"}', 'std_msgs/String')
assert isinstance(msg, String)
assert msg.data == 'Hello'
def test_parse_from_dict():
msg = ROSmsg.parse(dict(data='Hello'), 'std_msgs/String')
assert isinstance(msg, String)
assert msg.data == 'Hello'
def test_parse_unknown_type():
msg = ROSmsg.parse(dict(something='Hello'), 'std_msgs/Unknown')
assert isinstance(msg, ROSmsg)
assert msg.something == 'Hello'
def test_float32_multiarray():
m = Float32MultiArray(data=[1.3, 0.5, 3.0])
assert(repr(m) == "Float32MultiArray(layout=MultiArrayLayout(dim=[], data_offset=0), data=[1.3, 0.5, 3.0])")
def test_int8_multiarray():
m = Int8MultiArray(data=[1, 2, 3, 4])
assert(repr(m) == "Int8MultiArray(layout=MultiArrayLayout(dim=[], data_offset=0), data=[1, 2, 3, 4])")
def test_posearray():
from compas.geometry import Frame
p = [Pose.from_frame(f) for f in [Frame.worldXY()]]
m = PoseArray(header=Header(), poses=p)
assert(repr(m) == "PoseArray(header=Header(seq=0, stamp=Time(secs=0, nsecs=0), frame_id='/world'), poses=[Pose(position=Point(x=0.0, y=0.0, z=0.0), orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0))])") # noqa E501
|
"""Add index to url
Revision ID: 2f15229eefbd
Revises: 4167d142d57b
Create Date: 2017-10-14 20:16:16.172385
"""
# revision identifiers, used by Alembic.
revision = '2f15229eefbd'
down_revision = '4167d142d57b'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_RepositoryApps_url_shortened', 'RepositoryApps', ['url'], unique=False, mysql_length={'url': 255})
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_RepositoryApps_url_shortened', table_name='RepositoryApps')
# ### end Alembic commands ###
|
import copy
import numpy as NP
import matplotlib.pyplot as PLT
import matplotlib.colors as PLTC
import matplotlib.ticker as PLTick
import yaml, argparse, warnings
import progressbar as PGB
from prisim import bispectrum_phase as BSP
import ipdb as PDB
PLT.switch_backend("TkAgg")
if __name__ == '__main__':
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to analyze closure phases from multiple days from multiple sources such as polarizations')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-i', '--infile', dest='infile', default='/data3/t_nithyanandan/codes/mine/python/projects/closure/combine_pol_multiday_EQ28_data_RA_1.6_closure_PS_analysis_parms.yaml', type=str, required=False, help='File specifying input parameters')
args = vars(parser.parse_args())
with open(args['infile'], 'r') as parms_file:
parms = yaml.safe_load(parms_file)
datadirs = parms['dirStruct']['datadirs']
infiles_a = parms['dirStruct']['infiles_a']
infiles_a_errinfo = parms['dirStruct']['err_infiles_a']
infiles_b = parms['dirStruct']['infiles_b']
infiles_b_errinfo = parms['dirStruct']['err_infiles_b']
model_labels = parms['dirStruct']['modelinfo']['model_labels']
mdldirs = parms['dirStruct']['modelinfo']['mdldirs']
mdl_infiles_a = parms['dirStruct']['modelinfo']['infiles_a']
mdl_infiles_a_errinfo = parms['dirStruct']['modelinfo']['err_infiles_a']
mdl_infiles_b = parms['dirStruct']['modelinfo']['infiles_b']
mdl_infiles_b_errinfo = parms['dirStruct']['modelinfo']['err_infiles_b']
outdir = parms['dirStruct']['outdir']
figdir = outdir + parms['dirStruct']['figdir']
plotfile_pfx = parms['dirStruct']['plotfile_pfx']
xcpdps_a = []
excpdps_a = []
xcpdps_b = []
excpdps_b = []
for fileind,indir in enumerate(datadirs):
infile_a = indir + infiles_a[fileind]
infile_a_errinfo = indir + infiles_a_errinfo[fileind]
infile_b = indir + infiles_b[fileind]
infile_b_errinfo = indir + infiles_b_errinfo[fileind]
xcpdps_a += [BSP.read_CPhase_cross_power_spectrum(infile_a)]
excpdps_a += [BSP.read_CPhase_cross_power_spectrum(infile_a_errinfo)]
xcpdps_b += [BSP.read_CPhase_cross_power_spectrum(infile_b)]
excpdps_b += [BSP.read_CPhase_cross_power_spectrum(infile_b_errinfo)]
xcpdps_a_avg_pol, excpdps_a_avg_pol = BSP.incoherent_cross_power_spectrum_average(xcpdps_a, excpdps=excpdps_a, diagoffsets=None)
xcpdps_b_avg_pol, excpdps_b_avg_pol = BSP.incoherent_cross_power_spectrum_average(xcpdps_b, excpdps=excpdps_b, diagoffsets=None)
models_xcpdps_a_avg_pol = []
models_excpdps_a_avg_pol = []
models_xcpdps_b_avg_pol = []
models_excpdps_b_avg_pol = []
for mdlind, model in enumerate(model_labels):
mdl_xcpdps_a = []
mdl_excpdps_a = []
mdl_xcpdps_b = []
mdl_excpdps_b = []
for fileind,mdldir in enumerate(mdldirs[mdlind]):
mdl_infile_a = mdldir + mdl_infiles_a[mdlind][fileind]
mdl_infile_a_errinfo = mdldir + mdl_infiles_a_errinfo[mdlind][fileind]
mdl_infile_b = mdldir + mdl_infiles_b[mdlind][fileind]
mdl_infile_b_errinfo = mdldir + mdl_infiles_b_errinfo[mdlind][fileind]
mdl_xcpdps_a += [BSP.read_CPhase_cross_power_spectrum(mdl_infile_a)]
mdl_excpdps_a += [BSP.read_CPhase_cross_power_spectrum(mdl_infile_a_errinfo)]
mdl_xcpdps_b += [BSP.read_CPhase_cross_power_spectrum(mdl_infile_b)]
mdl_excpdps_b += [BSP.read_CPhase_cross_power_spectrum(mdl_infile_b_errinfo)]
mdl_xcpdps_a_avg_pol, mdl_excpdps_a_avg_pol = BSP.incoherent_cross_power_spectrum_average(mdl_xcpdps_a, excpdps=mdl_excpdps_a, diagoffsets=None)
models_xcpdps_a_avg_pol += [mdl_xcpdps_a_avg_pol]
models_excpdps_a_avg_pol += [mdl_excpdps_a_avg_pol]
mdl_xcpdps_b_avg_pol, mdl_excpdps_b_avg_pol = BSP.incoherent_cross_power_spectrum_average(mdl_xcpdps_b, excpdps=mdl_excpdps_b, diagoffsets=None)
models_xcpdps_b_avg_pol += [mdl_xcpdps_b_avg_pol]
models_excpdps_b_avg_pol += [mdl_excpdps_b_avg_pol]
plot_info = parms['plot']
plots = [key for key in plot_info if plot_info[key]['action']]
PLT.ion()
if ('2' in plots) or ('2a' in plots) or ('2b' in plots) or ('2c' in plots) or ('2d' in plots):
sampling = plot_info['2']['sampling']
statistic = plot_info['2']['statistic']
datapool = plot_info['2']['datapool']
pspec_unit_type = plot_info['2']['units']
if pspec_unit_type == 'K':
pspec_unit = 'mK2 Mpc3'
else:
pspec_unit = 'Jy2 Mpc'
spw = plot_info['2']['spw']
if spw is None:
spwind = NP.arange(xcpdps2_a[sampling]['z'].size)
else:
spwind = NP.asarray(spw)
if statistic is None:
statistic = ['mean', 'median']
else:
statistic = [statistic]
ps_errtype = plot_info['2']['errtype']
errshade = {}
for errtype in ps_errtype:
if errtype.lower() == 'ssdiff':
errshade[errtype] = '0.8'
elif errtype.lower() == 'psdiff':
errshade[errtype] = '0.6'
nsigma = plot_info['2']['nsigma']
mdl_colrs = ['red', 'green', 'blue', 'cyan', 'gray', 'orange']
if ('2c' in plots) or ('2d' in plots):
avg_incohax_a = plot_info['2c']['incohax_a']
diagoffsets_incohax_a = plot_info['2c']['diagoffsets_a']
diagoffsets_a = []
avg_incohax_b = plot_info['2c']['incohax_b']
diagoffsets_incohax_b = plot_info['2c']['diagoffsets_b']
diagoffsets_b = []
for combi,incax_comb in enumerate(avg_incohax_a):
diagoffsets_a += [{}]
for incaxind,incax in enumerate(incax_comb):
diagoffsets_a[-1][incax] = NP.asarray(diagoffsets_incohax_a[combi][incaxind])
xcpdps_a_avg_pol_diag, excpdps_a_avg_pol_diag = BSP.incoherent_cross_power_spectrum_average(xcpdps_a_avg_pol, excpdps=excpdps_a_avg_pol, diagoffsets=diagoffsets_a)
models_xcpdps_a_avg_pol_diag = []
models_excpdps_a_avg_pol_diag = []
for combi,incax_comb in enumerate(avg_incohax_b):
diagoffsets_b += [{}]
for incaxind,incax in enumerate(incax_comb):
diagoffsets_b[-1][incax] = NP.asarray(diagoffsets_incohax_b[combi][incaxind])
xcpdps_b_avg_pol_diag, excpdps_b_avg_pol_diag = BSP.incoherent_cross_power_spectrum_average(xcpdps_b_avg_pol, excpdps=excpdps_b_avg_pol, diagoffsets=diagoffsets_b)
models_xcpdps_b_avg_pol_diag = []
models_excpdps_b_avg_pol_diag = []
if len(model_labels) > 0:
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Models '.format(len(model_labels)), PGB.ETA()], maxval=len(model_labels)).start()
for i in range(len(model_labels)):
model_xcpdps_a_avg_pol_diag, model_excpdps_a_avg_pol_diag = BSP.incoherent_cross_power_spectrum_average(models_xcpdps_a_avg_pol[i], excpdps=models_excpdps_a_avg_pol[i], diagoffsets=diagoffsets_a)
models_xcpdps_a_avg_pol_diag += [copy.deepcopy(model_xcpdps_a_avg_pol_diag)]
models_excpdps_a_avg_pol_diag += [copy.deepcopy(model_excpdps_a_avg_pol_diag)]
model_xcpdps_b_avg_pol_diag, model_excpdps_b_avg_pol_diag = BSP.incoherent_cross_power_spectrum_average(models_xcpdps_b_avg_pol[i], excpdps=models_excpdps_b_avg_pol[i], diagoffsets=diagoffsets_b)
models_xcpdps_b_avg_pol_diag += [copy.deepcopy(model_xcpdps_b_avg_pol_diag)]
models_excpdps_b_avg_pol_diag += [copy.deepcopy(model_excpdps_b_avg_pol_diag)]
progress.update(i+1)
progress.finish()
if '2c' in plots:
lstind = [0]
triadind = [0]
dayind = [0]
dayind_models = NP.zeros(len(model_labels), dtype=int).reshape(1,-1)
for stat in statistic:
for zind in spwind:
for lind in lstind:
for di,dind in enumerate(dayind):
for combi in range(len(diagoffsets_b)):
maxabsvals = []
minabsvals = []
maxvals = []
minvals = []
fig, axs = PLT.subplots(nrows=1, ncols=len(datapool), sharex=True, sharey=True, figsize=(4.0*len(datapool), 3.6))
if len(datapool) == 1:
axs = [axs]
for dpoolind,dpool in enumerate(datapool):
for trno,trind in enumerate(triadind):
# if len(model_labels) > 0:
# for mdlind, mdl in enumerate(model_labels):
# if dpool in models_xcpdps_b_avg_pol_diag[mdlind][sampling]:
# psval = (1/3.0) * models_xcpdps_b_avg_pol_diag[mdlind][sampling][dpool][stat][combi][zind,lind,dayind_models[di][mdlind],trind,:].to(pspec_unit).value
# maxabsvals += [NP.abs(psval.real).max()]
# minabsvals += [NP.abs(psval.real).min()]
# maxvals += [psval.real.max()]
# minvals += [psval.real.min()]
# axs[dpoolind].plot(models_xcpdps_b_avg_pol_diag[mdlind][sampling]['kprll'][zind,:], psval.real, ls='none', marker='.', ms=3, color=mdl_colrs[mdlind], label='{0}'.format(mdl))
if dpool in xcpdps_b_avg_pol_diag[sampling]:
psval = (2/3.0) * xcpdps_b_avg_pol_diag[sampling][dpool][stat][combi][zind,lind,dind,trind,:].to(pspec_unit).value
psrms_ssdiff = (2/3.0) * NP.nanstd(excpdps_a_avg_pol_diag[sampling]['errinfo'][stat][combi][zind,lind,:,trind,:], axis=0).to(pspec_unit).value
if 2 in avg_incohax_b[combi]:
ind_dayax_in_incohax = avg_incohax_b[combi].index(2)
if 0 in diagoffsets_incohax_b[combi][ind_dayax_in_incohax]:
rms_inflation_factor = 2.0 * NP.sqrt(2.0)
else:
rms_inflation_factor = NP.sqrt(2.0)
else:
rms_inflation_factor = NP.sqrt(2.0)
psrms_psdiff = (2/3.0) * (xcpdps_a_avg_pol_diag[sampling][dpool][stat][combi][zind,lind,1,1,trind,:] - xcpdps_a_avg_pol_diag[sampling][dpool][stat][combi][zind,lind,0,0,trind,:]).to(pspec_unit).value
psrms_psdiff = NP.abs(psrms_psdiff.real) / rms_inflation_factor
psrms_max = NP.amax(NP.vstack((psrms_ssdiff, psrms_psdiff)), axis=0)
maxabsvals += [NP.abs(psval.real + nsigma*psrms_max).max()]
minabsvals += [NP.abs(psval.real).min()]
maxvals += [(psval.real + nsigma*psrms_max).max()]
minvals += [(psval.real - nsigma*psrms_max).min()]
for errtype in ps_errtype:
if errtype.lower() == 'ssdiff':
axs[dpoolind].errorbar(xcpdps_b_avg_pol_diag[sampling]['kprll'][zind,:], psval.real, yerr=nsigma*psrms_ssdiff, xerr=None, ecolor=errshade[errtype], ls='none', marker='.', ms=4, color='black')
elif errtype.lower() == 'psdiff':
axs[dpoolind].errorbar(xcpdps_b_avg_pol_diag[sampling]['kprll'][zind,:], psval.real, yerr=nsigma*psrms_psdiff, xerr=None, ecolor=errshade[errtype], ls='none', marker='.', ms=4, color='black', label='FG+N')
# legend = axs[dpoolind].legend(loc='center', bbox_to_anchor=(0.5,0.3), shadow=False, fontsize=8)
if trno == 0:
axs[dpoolind].text(0.95, 0.97, r'$z=$'+' {0:.1f}'.format(xcpdps_b_avg_pol_diag[sampling]['z'][zind]), transform=axs[dpoolind].transAxes, fontsize=8, weight='medium', ha='right', va='top', color='black')
axt = axs[dpoolind].twiny()
axt.set_xlim(1e6*xcpdps_b_avg_pol_diag[sampling]['lags'].min(), 1e6*xcpdps_b_avg_pol_diag[sampling]['lags'].max())
axs[dpoolind].axhline(y=0, xmin=0, xmax=1, ls='-', lw=1, color='black')
minvals = NP.asarray(minvals)
maxvals = NP.asarray(maxvals)
minabsvals = NP.asarray(minabsvals)
maxabsvals = NP.asarray(maxabsvals)
axs[dpoolind].set_xlim(0.99*xcpdps_b_avg_pol_diag[sampling]['kprll'][zind,:].min(), 1.01*xcpdps_b_avg_pol_diag[sampling]['kprll'][zind,:].max())
if NP.min(minvals) < 0.0:
axs[dpoolind].set_ylim(1.5*NP.min(minvals), 2*NP.max(maxabsvals))
else:
axs[dpoolind].set_ylim(0.5*NP.min(minvals), 2*NP.max(maxabsvals))
axs[dpoolind].set_yscale('symlog', linthreshy=10**NP.floor(NP.log10(NP.min(minabsvals[minabsvals > 0.0]))))
tickloc = PLTick.SymmetricalLogLocator(linthresh=10**NP.floor(NP.log10(NP.min(minabsvals[minabsvals > 0.0]))), base=100.0)
axs[dpoolind].yaxis.set_major_locator(tickloc)
axs[dpoolind].grid(color='0.9', which='both', linestyle=':', lw=1)
fig.subplots_adjust(top=0.85)
fig.subplots_adjust(bottom=0.16)
fig.subplots_adjust(left=0.22)
fig.subplots_adjust(right=0.98)
big_ax = fig.add_subplot(111)
big_ax.set_facecolor('none') # matplotlib.__version__ >= 2.0.0
# big_ax.set_axis_bgcolor('none') # matplotlib.__version__ < 2.0.0
big_ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\kappa_\parallel$'+' [pseudo '+r'$h$'+' Mpc'+r'$^{-1}$'+']', fontsize=12, weight='medium', labelpad=20)
if pspec_unit_type == 'K':
big_ax.set_ylabel(r'$\frac{2}{3}\, P_\nabla(\kappa_\parallel)$ [pseudo mK$^2h^{-3}$ Mpc$^3$]', fontsize=12, weight='medium', labelpad=40)
else:
big_ax.set_ylabel(r'$\frac{2}{3}\, P_\nabla(\kappa_\parallel)$ [pseudo Jy$^2h^{-1}$ Mpc]', fontsize=12, weight='medium', labelpad=40)
big_axt = big_ax.twiny()
big_axt.set_xticks([])
big_axt.set_xlabel(r'$\tau$'+' ['+r'$\mu$'+'s]', fontsize=12, weight='medium', labelpad=20)
PLT.savefig(figdir + '{0}_symlog_incoh_avg_real_cpdps_z_{1:.1f}_{2}_{3}_dlst_{4:.1f}s_comb_{5:0d}.pdf'.format(plotfile_pfx, xcpdps_b_avg_pol_diag[sampling]['z'][zind], stat, sampling, 3.6e3*xcpdps_b_avg_pol_diag['dlst'][0], combi), bbox_inches=0)
PDB.set_trace()
if '2d' in plots:
kbin_min = plot_info['2d']['kbin_min']
kbin_max = plot_info['2d']['kbin_max']
num_kbins = plot_info['2d']['num_kbins']
kbintype = plot_info['2d']['kbintype']
if (kbin_min is None) or (kbin_max is None):
kbins = None
else:
if num_kbins is None:
raise ValueError('Input num_kbins must be set if kbin range is provided')
if kbintype == 'linear':
kbins = NP.linspace(kbin_min, kbin_max, num=num_kbins, endpoint=True)
elif kbintype == 'log':
if kbin_min > 0.0:
kbins = NP.geomspace(kbin_min, kbin_max, num=num_kbins, endpoint=True)
elif kbin_min == 0.0:
eps_k = 1e-3
kbins = NP.geomspace(kbin_min+eps_k, kbin_max, num=num_kbins, endpoint=True)
else:
eps_k = 1e-3
kbins_pos = NP.geomspace(eps_k, kbin_max, num=num_kbins, endpoint=True)
ind_kbin_thresh = NP.argmin(kbins_pos[kbins_pos >= NP.abs(kbin_min)])
kbins_neg = -1 * kbins_pos[:ind_kbin_thresh+1][::-1]
kbins = NP.hstack((kbins_neg, kbins_pos))
else:
raise ValueError('Input kbintype must be set to "linear" or "log"')
xcpdps_a_avg_pol_diag_kbin = BSP.incoherent_kbin_averaging(xcpdps_a_avg_pol_diag, kbins=kbins, kbintype=kbintype)
excpdps_a_avg_pol_diag_kbin = BSP.incoherent_kbin_averaging(excpdps_a_avg_pol_diag, kbins=kbins, kbintype=kbintype)
models_xcpdps_a_avg_pol_diag_kbin = []
models_excpdps_a_avg_pol_diag_kbin = []
xcpdps_b_avg_pol_diag_kbin = BSP.incoherent_kbin_averaging(xcpdps_b_avg_pol_diag, kbins=kbins, kbintype=kbintype)
excpdps_b_avg_pol_diag_kbin = BSP.incoherent_kbin_averaging(excpdps_b_avg_pol_diag, kbins=kbins, kbintype=kbintype)
models_xcpdps_b_avg_pol_diag_kbin = []
models_excpdps_b_avg_pol_diag_kbin = []
if len(model_labels) > 0:
for i in range(len(model_labels)):
models_xcpdps_a_avg_pol_diag_kbin += [BSP.incoherent_kbin_averaging(models_xcpdps_a_avg_pol_diag[i], kbins=kbins, kbintype=kbintype)]
models_excpdps_a_avg_pol_diag_kbin += [BSP.incoherent_kbin_averaging(models_excpdps_a_avg_pol_diag[i], kbins=kbins, kbintype=kbintype)]
models_xcpdps_b_avg_pol_diag_kbin += [BSP.incoherent_kbin_averaging(models_xcpdps_b_avg_pol_diag[i], kbins=kbins, kbintype=kbintype)]
models_excpdps_b_avg_pol_diag_kbin += [BSP.incoherent_kbin_averaging(models_excpdps_b_avg_pol_diag[i], kbins=kbins, kbintype=kbintype)]
lstind = [0]
triadind = [0]
dayind = [0]
dayind_models = NP.zeros(len(model_labels), dtype=int).reshape(1,-1)
for stat in statistic:
for zind in spwind:
for lind in lstind:
for di,dind in enumerate(dayind):
for pstype in ['PS', 'Del2']:
for combi in range(len(diagoffsets_b)):
maxabsvals = []
minabsvals = []
maxvals = []
minvals = []
if pstype == 'Del2':
fig, axs = PLT.subplots(nrows=1, ncols=len(datapool), sharex=True, sharey=True, figsize=(4.0*len(datapool), 6.0))
else:
fig, axs = PLT.subplots(nrows=1, ncols=len(datapool), sharex=True, sharey=True, figsize=(4.0*len(datapool), 3.6))
if len(datapool) == 1:
axs = [axs]
for dpoolind,dpool in enumerate(datapool):
for trno,trind in enumerate(triadind):
if pstype == 'Del2':
if len(model_labels) > 0:
for mdlind, mdl in enumerate(model_labels):
if dpool in models_xcpdps_b_avg_pol_diag_kbin[mdlind][sampling]:
if pstype == 'PS':
psval = (2/3.0) * models_xcpdps_b_avg_pol_diag_kbin[mdlind][sampling][dpool][stat][pstype][combi][zind,lind,dayind_models[di][mdlind],trind,:].to(pspec_unit).value
else:
psval = (2/3.0) * models_xcpdps_b_avg_pol_diag_kbin[mdlind][sampling][dpool][stat][pstype][combi][zind,lind,dayind_models[di][mdlind],trind,:].to('mK2').value
kval = models_xcpdps_b_avg_pol_diag_kbin[mdlind][sampling]['kbininfo'][dpool][stat][combi][zind,lind,dayind_models[di][mdlind],trind,:].to('Mpc-1').value
maxabsvals += [NP.nanmin(NP.abs(psval.real))]
minabsvals += [NP.nanmin(NP.abs(psval.real))]
maxvals += [NP.nanmax(psval.real)]
minvals += [NP.nanmin(psval.real)]
axs[dpoolind].plot(kval, psval.real, ls='none', marker='.', ms=3, color=mdl_colrs[mdlind], label='{0}'.format(mdl))
if dpool in xcpdps_b_avg_pol_diag_kbin[sampling]:
if pstype == 'PS':
psval = (2/3.0) * xcpdps_b_avg_pol_diag_kbin[sampling][dpool][stat][pstype][combi][zind,lind,dind,trind,:].to(pspec_unit).value
psrms_ssdiff = (2/3.0) * NP.nanstd(excpdps_b_avg_pol_diag_kbin[sampling]['errinfo'][stat][pstype][combi][zind,lind,:,trind,:], axis=0).to(pspec_unit).value
psrms_psdiff = (2/3.0) * (xcpdps_a_avg_pol_diag_kbin[sampling][dpool][stat][pstype][combi][zind,lind,1,1,trind,:] - xcpdps_a_avg_pol_diag_kbin[sampling][dpool][stat][pstype][combi][zind,lind,0,0,trind,:]).to(pspec_unit).value
else:
psval = (2/3.0) * xcpdps_b_avg_pol_diag_kbin[sampling][dpool][stat][pstype][combi][zind,lind,dind,trind,:].to('mK2').value
psrms_ssdiff = (2/3.0) * NP.nanstd(excpdps_b_avg_pol_diag_kbin[sampling]['errinfo'][stat][pstype][combi][zind,lind,:,trind,:], axis=0).to('mK2').value
psrms_psdiff = (2/3.0) * (xcpdps_a_avg_pol_diag_kbin[sampling][dpool][stat][pstype][combi][zind,lind,1,1,trind,:] - xcpdps_a_avg_pol_diag_kbin[sampling][dpool][stat][pstype][combi][zind,lind,0,0,trind,:]).to('mK2').value
if 2 in avg_incohax_b[combi]:
ind_dayax_in_incohax = avg_incohax_b[combi].index(2)
if 0 in diagoffsets_incohax_b[combi][ind_dayax_in_incohax]:
rms_inflation_factor = 2.0 * NP.sqrt(2.0)
else:
rms_inflation_factor = NP.sqrt(2.0)
else:
rms_inflation_factor = NP.sqrt(2.0)
psrms_psdiff = NP.abs(psrms_psdiff.real) / rms_inflation_factor
psrms_max = NP.amax(NP.vstack((psrms_ssdiff, psrms_psdiff)), axis=0)
kval = xcpdps_b_avg_pol_diag_kbin[sampling]['kbininfo'][dpool][stat][combi][zind,lind,dind,trind,:].to('Mpc-1').value
maxabsvals += [NP.nanmax(NP.abs(psval.real + nsigma*psrms_max.real))]
minabsvals += [NP.nanmin(NP.abs(psval.real))]
maxvals += [NP.nanmax(psval.real + nsigma*psrms_max.real)]
minvals += [NP.nanmin(psval.real - nsigma*psrms_max.real)]
for errtype in ps_errtype:
if errtype.lower() == 'ssdiff':
axs[dpoolind].errorbar(kval, psval.real, yerr=nsigma*psrms_ssdiff, xerr=None, ecolor=errshade[errtype.lower()], ls='none', marker='.', ms=4, color='black')
elif errtype.lower() == 'psdiff':
axs[dpoolind].errorbar(kval, psval.real, yerr=nsigma*psrms_psdiff, xerr=None, ecolor=errshade[errtype.lower()], ls='none', marker='.', ms=4, color='black', label='Data')
if pstype == 'Del2':
legend = axs[dpoolind].legend(loc='center', bbox_to_anchor=(0.5,0.3), shadow=False, fontsize=8)
if trno == 0:
axs[dpoolind].text(0.95, 0.97, r'$z=$'+' {0:.1f}'.format(xcpdps_b_avg_pol_diag_kbin['resampled']['z'][zind]), transform=axs[dpoolind].transAxes, fontsize=8, weight='medium', ha='right', va='top', color='black')
axs[dpoolind].axhline(y=0, xmin=0, xmax=1, ls='-', lw=1, color='black')
minvals = NP.asarray(minvals)
maxvals = NP.asarray(maxvals)
minabsvals = NP.asarray(minabsvals)
maxabsvals = NP.asarray(maxabsvals)
axs[dpoolind].set_xlim(0.99*NP.nanmin(xcpdps_b_avg_pol_diag_kbin['resampled']['kbininfo']['kbin_edges'][zind].to('Mpc-1').value), 1.01*NP.nanmax(xcpdps_b_avg_pol_diag_kbin['resampled']['kbininfo']['kbin_edges'][zind].to('Mpc-1').value))
if NP.min(minvals) < 0.0:
axs[dpoolind].set_ylim(1.5*NP.nanmin(minvals), 2*NP.nanmax(maxabsvals))
else:
axs[dpoolind].set_ylim(0.5*NP.nanmin(minvals), 2*NP.nanmax(maxabsvals))
axs[dpoolind].set_yscale('symlog', linthreshy=10**NP.floor(NP.log10(NP.min(minabsvals[minabsvals > 0.0]))))
tickloc = PLTick.SymmetricalLogLocator(linthresh=10**NP.floor(NP.log10(NP.min(minabsvals[minabsvals > 0.0]))), base=100.0)
axs[dpoolind].yaxis.set_major_locator(tickloc)
axs[dpoolind].grid(color='0.8', which='both', linestyle=':', lw=1)
fig.subplots_adjust(top=0.95)
fig.subplots_adjust(bottom=0.16)
fig.subplots_adjust(left=0.22)
fig.subplots_adjust(right=0.98)
big_ax = fig.add_subplot(111)
big_ax.set_facecolor('none') # matplotlib.__version__ >= 2.0.0
# big_ax.set_axis_bgcolor('none') # matplotlib.__version__ < 2.0.0
big_ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
big_ax.set_xticks([])
big_ax.set_yticks([])
big_ax.set_xlabel(r'$\kappa_\parallel$'+' [pseudo '+r'$h$'+' Mpc'+r'$^{-1}$'+']', fontsize=12, weight='medium', labelpad=20)
if pstype == 'PS':
big_ax.set_ylabel(r'$\frac{2}{3}\, P_\nabla(\kappa_\parallel)$ [pseudo mK$^2h^{-3}$ Mpc$^3$]', fontsize=12, weight='medium', labelpad=40)
else:
big_ax.set_ylabel(r'$\frac{2}{3}\, \Delta_\nabla^2(\kappa_\parallel)$ [pseudo mK$^2$]', fontsize=12, weight='medium', labelpad=40)
# big_axt = big_ax.twiny()
# big_axt.set_xticks([])
# big_axt.set_xlabel(r'$\tau$'+' ['+r'$\mu$'+'s]', fontsize=12, weight='medium', labelpad=20)
if pstype == 'PS':
PLT.savefig(figdir + '{0}_symlog_incoh_kbin_avg_real_cpdps_z_{1:.1f}_{2}_{3}_dlst_{4:.1f}s_comb_{5:0d}.pdf'.format(plotfile_pfx, xcpdps_a_avg_pol_diag_kbin[sampling]['z'][zind], stat, sampling, 3.6e3*xcpdps_b_avg_pol_diag_kbin['dlst'][0], combi), bbox_inches=0)
else:
PLT.savefig(figdir + '{0}_symlog_incoh_kbin_avg_real_cpDel2_z_{1:.1f}_{2}_{3}_dlst_{4:.1f}s_comb_{5:0d}.pdf'.format(plotfile_pfx, xcpdps_a_avg_pol_diag_kbin[sampling]['z'][zind], stat, sampling, 3.6e3*xcpdps_b_avg_pol_diag_kbin['dlst'][0], combi), bbox_inches=0)
PDB.set_trace()
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import io
import os
import select
import socket
import threading
from contextlib import contextmanager
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
class NailgunStreamReader(threading.Thread):
"""Reads input from stdin and emits Nailgun 'stdin' chunks over a socket."""
SELECT_TIMEOUT = 1
def __init__(self, in_fd, sock, buf_size=io.DEFAULT_BUFFER_SIZE, select_timeout=SELECT_TIMEOUT):
"""
:param file in_fd: the input file descriptor (e.g. sys.stdin) to read from.
:param socket sock: the socket to emit nailgun protocol chunks over.
:param int buf_size: the buffer size for reads from the file descriptor.
:param int select_timeout: the timeout (in seconds) for select.select() calls against the fd.
"""
super(NailgunStreamReader, self).__init__()
self.daemon = True
self._stdin = in_fd
self._socket = sock
self._buf_size = buf_size
self._select_timeout = select_timeout
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._stopped = threading.Event()
@property
def is_stopped(self):
"""Indicates whether or not the instance is stopped."""
return self._stopped.is_set()
def stop(self):
"""Stops the instance."""
self._stopped.set()
@contextmanager
def running(self):
self.start()
yield
self.stop()
def run(self):
while not self.is_stopped:
readable, _, errored = select.select([self._stdin], [], [self._stdin], self._select_timeout)
if self._stdin in errored:
self.stop()
return
if not self.is_stopped and self._stdin in readable:
data = os.read(self._stdin.fileno(), self._buf_size)
if not self.is_stopped:
if data:
NailgunProtocol.write_chunk(self._socket, ChunkType.STDIN, data)
else:
NailgunProtocol.write_chunk(self._socket, ChunkType.STDIN_EOF)
try:
self._socket.shutdown(socket.SHUT_WR) # Shutdown socket sends.
except socket.error: # Can happen if response is quick.
pass
finally:
self.stop()
class NailgunStreamWriter(object):
"""A sys.{stdout,stderr} replacement that writes output to a socket using the nailgun protocol."""
def __init__(self, sock, chunk_type, isatty=True, mask_broken_pipe=False):
"""
:param socket sock: A connected socket capable of speaking the nailgun protocol.
:param str chunk_type: A ChunkType constant representing the nailgun protocol chunk type.
:param bool isatty: Whether or not the consumer of this stream has tty capabilities. (Optional)
:param bool mask_broken_pipe: This will toggle the masking of 'broken pipe' errors when writing
to the remote socket. This allows for completion of execution in
the event of a client disconnect (e.g. to support cleanup work).
"""
self._socket = sock
self._chunk_type = chunk_type
self._isatty = isatty
self._mask_broken_pipe = mask_broken_pipe
def write(self, payload):
try:
NailgunProtocol.write_chunk(self._socket, self._chunk_type, payload)
except IOError as e:
# If the remote client disconnects and we try to perform a write (e.g. socket.send/sendall),
# an 'error: [Errno 32] Broken pipe' exception can be thrown. Setting mask_broken_pipe=True
# safeguards against this case (which is unexpected for most writers of sys.stdout etc) so
# that we don't awkwardly interrupt the runtime by throwing this exception on writes to
# stdout/stderr.
if e.errno == errno.EPIPE and not self._mask_broken_pipe:
raise
def flush(self):
return
def isatty(self):
return self._isatty
|
import cv2
def rotate_image(mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0, 0])
abs_sin = abs(rotation_mat[0, 1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
"""
method to resize the image given a certain width and/or height
@return: returns the processed image
"""
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
|
import unittest
import copy
import tempfile
import pickle
import os
import datetime as dt
from market_calendars.core import (Date,
Period,
Weekdays)
class TestDate(unittest.TestCase):
def test_date_input_with_serial_number(self):
serial_number = 45678
test_date = Date(serial_number=serial_number)
self.assertEqual(test_date.serial_number, serial_number)
def test_date_input_with_serial_number_and_notnull_year_month_day(self):
serial_number = 45678
_ = Date(year=2015, serial_number=serial_number)
def test_date_input_without_complete_information_on_year_month_day(self):
year = 2015
month = None
day = 18
with self.assertRaises(TypeError):
_ = Date(year=year, month=month, day=day)
def test_basic_functions(self):
year = 2015
month = 7
day = 24
str_repr = "{0}-{1:02d}-{2:02d}".format(year, month, day)
inner_repr = "Date({0}, {1}, {2})".format(year, month, day)
test_date = Date(year, month, day)
self.assertEqual(str(test_date), str_repr, "date string:\n"
"expected: {0:s}\n"
"calculated: {1:s}".format(str_repr, str(test_date)))
self.assertEqual(repr(test_date), inner_repr, "date representation:\n"
"expected: {0:s}\n"
"calculated: {1:s}".format(inner_repr, repr(test_date)))
self.assertEqual(test_date.year(), year, "date year:\n"
"expected: {0:d}\n"
"calculated: {1:d}".format(year, test_date.year()))
self.assertEqual(test_date.month(), month, "date month:\n"
"expected: {0:d}\n"
"calculated: {1:d}".format(month, test_date.month()))
self.assertEqual(test_date.day_of_month(), day, "date day:\n"
"expected: {0:d}\n"
"calculated: {1:d}".format(day, test_date.day_of_month()))
self.assertEqual(test_date.day_of_year(), test_date - Date(2015, 1, 1) + 1, "date day:\n"
"expected: {0:d}\n"
"calculated: {1:d}"
.format(test_date - Date(2015, 1, 1) + 1, test_date.day_of_year()))
self.assertEqual(test_date.weekday(), 6, "date weekday:\n"
"expected: {0:d}\n"
"calculated: {1:d}".format(5, test_date.weekday()))
self.assertEqual(test_date.to_datetime(), dt.datetime(year, month, day), "date datetime representation\n"
"expected: {0}\n"
"calculated: {1}".format(
dt.datetime(year, month, day), test_date.to_datetime()))
serial_number = test_date.serial_number
serial_date = Date(serial_number=serial_number)
self.assertEqual(serial_date, test_date, "date excel serial number representation\n"
"expected: {0:d}"
"calculated: {1:d}".format(serial_date.serial_number,
test_date.serial_number))
# test comparisons
previous_date = test_date - 1
self.assertTrue(previous_date < test_date, "{0} is not earlier than {1}".format(previous_date, test_date))
self.assertFalse(previous_date >= test_date,
"{0} should not be later than or equal to {1}".format(previous_date, test_date))
self.assertTrue((previous_date + 1) == test_date,
"{0} plus one day should be equal to {1}".format(previous_date, test_date))
# check static members
self.assertEqual(Date.min_date(), Date(1901, 1, 1), "min date is wrong")
self.assertEqual(Date.max_date(), Date(2199, 12, 31), "max date is wrong")
self.assertEqual(Date.end_of_month(test_date), Date(year, month, 31), "end of month is wrong")
self.assertTrue(Date.is_end_of_month(Date(year, month, 31)), "{0} should be the end of month")
self.assertEqual(Date.next_weekday(test_date, test_date.weekday()), test_date,
"{0}'s next same week day should be {1}"
.format(test_date, test_date))
expected_date = dt.date.today()
expected_date = dt.datetime(expected_date.year, expected_date.month, expected_date.day)
self.assertEqual(Date.today_date().to_datetime(), expected_date, "today's date\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date,
Date.today_date()))
# nth-week day
with self.assertRaises(ValueError):
_ = Date.nth_weekday(0, Weekdays.Friday, 1, 2015)
with self.assertRaises(ValueError):
_ = Date.nth_weekday(6, Weekdays.Friday, 1, 2015)
self.assertEqual(Date.nth_weekday(3, Weekdays.Wednesday, 8, 2015), Date(2015, 8, 19))
# check plus/sub
three_weeks_after = test_date + '3W'
expected_date = test_date + 21
self.assertEqual(three_weeks_after, expected_date, "date + 3w period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date, three_weeks_after))
three_months_before = test_date - "3M"
expected_date = Date(year, month - 3, day)
self.assertEqual(three_months_before, expected_date, "date - 3m period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date, three_months_before))
three_months_before = test_date - Period("3M")
expected_date = Date(year, month - 3, day)
self.assertEqual(three_months_before, expected_date, "date - 3m period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date, three_months_before))
three_months_after = test_date + "3m"
expected_date = Date(year, month + 3, day)
self.assertEqual(three_months_after, expected_date, "date + 3m period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date, three_months_after))
one_year_and_two_months_before = test_date - "14m"
expected_date = Date(year - 1, month - 2, day)
self.assertEqual(one_year_and_two_months_before, expected_date, "date - 14m period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date,
three_months_before))
one_year_and_two_months_before = test_date + "14m"
expected_date = Date(year + 1, month + 2, day)
self.assertEqual(one_year_and_two_months_before, expected_date, "date + 14m period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date,
three_months_before))
five_months_after = test_date + "5m"
expected_date = Date(year, month + 5, day)
self.assertEqual(five_months_after, expected_date, "date + 5m period\n"
"expected: {0}\n"
"calculated: {1}".format(expected_date, five_months_after))
def test_date_advance_out_of_bounds(self):
test_date = Date(2199, 12, 30)
with self.assertRaises(ValueError):
_ = test_date + '1w'
test_date = Date(1901, 1, 1)
with self.assertRaises(ValueError):
_ = test_date - '1w'
def test_consistency(self):
min_date = Date.min_date().serial_number + 1
max_date = Date.max_date().serial_number
dyold = Date.from_excel_serial_number(min_date - 1).day_of_year()
dold = Date.from_excel_serial_number(min_date - 1).day_of_month()
mold = Date.from_excel_serial_number(min_date - 1).month()
yold = Date.from_excel_serial_number(min_date - 1).year()
wdold = Date.from_excel_serial_number(min_date - 1).weekday()
for i in range(min_date, max_date + 1):
t = Date.from_excel_serial_number(i)
serial = t.serial_number
self.assertEqual(serial, i, "inconsistent serial number:\n"
" original: {0:d}\n"
" serial number: {1:d}".format(i, serial))
dy = t.day_of_year()
d = t.day_of_month()
m = t.month()
y = t.year()
wd = t.weekday()
flag = (dy == dyold + 1) or \
(dy == 1 and dyold == 365 and not Date.is_leap(yold)) or \
(dy == 1 and dyold == 366 and Date.is_leap(yold))
self.assertTrue(flag, "wrong day of year increment: \n"
" date: {0}\n"
" day of year: {1:d}\n"
" previous: {2:d}".format(t, dy, dyold))
dyold = dy
flag = (d == dold + 1 and m == mold and y == yold) or \
(d == 1 and m == mold + 1 and y == yold) or \
(d == 1 and m == 1 and y == yold + 1)
self.assertTrue(flag, "wrong day,month,year increment: \n"
" date: {0}\n"
" year,month,day: {1:d}, {2:d}, {3:d}\n"
" previous: {4:d}, {5:d}, {6:d}".format(t, y, m, d, yold, mold, dold))
dold = d
mold = m
yold = y
self.assertTrue(d >= 1, "invalid day of month: \n"
" date: {0}\n"
" day: {1:d}".format(t, d))
flag = (m == 1 and d <= 31) or \
(m == 2 and d <= 28) or \
(m == 2 and d == 29 and Date.is_leap(y)) or \
(m == 3 and d <= 31) or \
(m == 4 and d <= 30) or \
(m == 5 and d <= 31) or \
(m == 6 and d <= 30) or \
(m == 7 and d <= 31) or \
(m == 8 and d <= 31) or \
(m == 9 and d <= 30) or \
(m == 10 and d <= 31) or \
(m == 11 and d <= 30) or \
(m == 12 and d <= 31)
self.assertTrue(flag, "invalid day of month: \n"
" date: {0}\n"
" day: {1:d}".format(t, d))
flag = (wd == (wdold + 1)) or (wd == 1 or wdold == 7)
self.assertTrue(flag, "invalid weekday: \n"
" date: {0}\n"
" weekday: {1:d}\n"
" previous: {2:d}".format(t, wd, wdold))
wdold = wd
s = Date(y, m, d)
serial = s.serial_number
self.assertTrue(serial == i, "inconsistent serial number:\n"
" date: {0}\n"
" serial number: {1:d}\n"
" cloned date: {2}\n"
" serial number: {3:d}".format(t, i, s, serial))
def test_ios_dates(self):
input_date = "2006-01-15"
d = Date.parse_iso(input_date)
flag = d.day_of_month() == 15 and \
d.month() == 1 and \
d.year() == 2006
self.assertTrue(flag, "Iso date failed\n"
" input date: {0}\n"
" day of month: {1:d}\n"
" month: {2:d}\n"
" year: {3:d}".format(input_date, d.day_of_month(), d.month(), d.year()))
def test_parse_dates(self):
input_date = "2006-01-15"
d = Date.strptime(input_date, "%Y-%m-%d")
flag = d == Date(2006, 1, 15)
self.assertTrue(flag, "date parsing failed\n"
" input date: {0:s}\n"
" parsed: {1}".format(input_date, d))
input_date = "12/02/2012"
d = Date.strptime(input_date, "%m/%d/%Y")
flag = d == Date(2012, 12, 2)
self.assertTrue(flag, "date parsing failed\n"
" input date: {0:s}\n"
" parsed: {1}".format(input_date, d))
d = Date.strptime(input_date, "%d/%m/%Y")
flag = d == Date(2012, 2, 12)
self.assertTrue(flag, "date parsing failed\n"
" input date: {0:s}\n"
" parsed: {1}".format(input_date, d))
input_date = "20011002"
d = Date.strptime(input_date, "%Y%m%d")
flag = d == Date(2001, 10, 2)
self.assertTrue(flag, "date parsing failed\n"
" input date: {0:s}\n"
" parsed: {1}".format(input_date, d))
def test_date_deep_copy(self):
benchmark_date = Date(2016, 1, 2)
copied_date = copy.deepcopy(benchmark_date)
self.assertEqual(benchmark_date, copied_date)
def test_date_pickle(self):
benchmark_date = Date(2016, 1, 2)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_date, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_date = pickle.load(f2)
self.assertEqual(benchmark_date, pickled_date)
os.unlink(f.name)
|
"This module is used to built simple calculator"
import math
pi =math.pi
def sum(x,y=1):
"This method calculates sum of two numbers"
return x+y
def diff(x,y=1):
return x-y
def mul(x,y):
return x*y
def div(x,y=1):
assert ( y != 0), "Cannot be divided by zero"
return x/y
def area_peri(r=1):
"Calculates area and perimeter for given radius of circle"
return pi*r**2, 2*pi*r
# print(__name__)
|
# coding=utf-8
# author=uliontse
# binary search:
def binary_search(arr, target):
# arr.sort()
left, right = 0, len(arr)-1
while left <= right:
mid = (left + right) // 2
if mid < target:
left = mid + 1
elif mid > target:
right = mid - 1
else:
return True
return False
# two sum:
def two_sum(nums, target):
def _binary_search(arr, target):
left, right = 0, len(arr)-1
while left <= right:
mid = (left + right) // 2
if arr[mid] < target:
left = mid + 1
elif arr[mid] > target:
right = mid - 1
else:
return True
return False
n = len(nums)
i, j = 0, n-1
while i < j and nums[i] + nums[j] < target:
i += 1
while i < j and nums[i] + nums[j] > target:
j -= 1
for k in range(i, j+1):
sub = target - nums[k]
if _binary_search(nums[k+1:j+1], sub):
return [nums[k], sub]
return [] # only one pair
# three sum:
def three_sum(nums, target=0):
n = len(nums)
if n < 3:
return []
nums.sort()
pool = []
for a_id in range(n - 2):
if nums[a_id] > target / 3:
break
if a_id > 0 and nums[a_id] == nums[a_id - 1]:
continue
b_id, c_id = a_id + 1, n - 1
while b_id < c_id:
total = nums[a_id] + nums[b_id] + nums[c_id]
if total < target:
b_id += 1
elif total > target:
c_id -= 1
else:
pool.append([nums[a_id], nums[b_id], nums[c_id]])
while b_id < c_id and nums[b_id] == nums[b_id + 1]:
b_id += 1
while b_id < c_id and nums[c_id] == nums[c_id - 1]:
c_id -= 1
b_id += 1
c_id -= 1
return pool
# four sum:
def four_sum(nums, target):
n = len(nums)
if n < 4:
return []
nums.sort()
pool = []
for a_id in range(n - 3):
if nums[a_id] > target / 4:
break
if a_id > 0 and nums[a_id] == nums[a_id - 1]:
continue
for b_id in range(a_id + 1, n - 2):
if nums[a_id] + nums[b_id] > target / 2: # or nums[a_id]*3 + nums[b_id] > target:
break
if b_id > a_id + 1 and nums[b_id] == nums[b_id - 1]:
continue
c_id, d_id = b_id + 1, n - 1
while c_id < d_id:
total = nums[a_id] + nums[b_id] + nums[c_id] + nums[d_id]
if total < target:
c_id += 1
elif total > target:
d_id -= 1
else:
pool.append([nums[a_id], nums[b_id], nums[c_id], nums[d_id]])
while c_id < d_id and nums[c_id] == nums[c_id + 1]:
c_id += 1
while c_id < d_id and nums[d_id] == nums[d_id - 1]:
d_id -= 1
c_id += 1
d_id -= 1
return pool
|
import numpy
from kernel_tuner import tune_kernel, run_kernel
from numba import cuda
import json
import argparse
from numpyencoder import NumpyEncoder
# Setup CLI parser
parser = argparse.ArgumentParser(description="MD tuner")
parser.add_argument("--size", "-s", type=int, default=1, help="problem size to the benchmark (e.g.: 2)")
parser.add_argument("--technique", "-t", type=str, default="brute_force", help="tuning technique to use for the benchmark (e.g.: annealing)")
arguments = parser.parse_args()
# Problem sizes used in the SHOC benchmark
problem_sizes = [12288, 24576, 36864, 73728]
gpu = cuda.get_current_device()
max_block_size = gpu.MAX_THREADS_PER_BLOCK
input_problem_size = arguments.size
size = problem_sizes[input_problem_size - 1]
# Use host code in combination with CUDA kernel
kernel_files = ['md_host.cu', '../../../src/kernels/md/md_kernel.cu']
tune_params = dict()
tune_params["BLOCK_SIZE"] = [i for i in range(1, max_block_size + 1)] # Range: [1, ..., max_block_size]
tune_params["PRECISION"] = [32, 64]
tune_params["TEXTURE_MEMORY"] = [0, 1]
tune_params["WORK_PER_THREAD"] = [i for i in range(1, 6)] # Range: [1, ..., 5]
strategy_options = {}
if arguments.technique == "genetic_algorithm":
strategy_options = {"maxiter": 50, "popsize": 10}
# Tune all kernels and correctness verify by throwing error if verification failed
tuning_results = tune_kernel("md_host", kernel_files, size, [], tune_params, strategy=arguments.technique, lang="C",
block_size_names=["BLOCK_SIZE"], compiler_options=["-I ../../../src/kernels/md/", f"-DPROBLEM_SIZE={input_problem_size}"],
iterations=2, strategy_options=strategy_options)
# Save the results as a JSON file
with open("md-results.json", 'w') as f:
json.dump(tuning_results, f, indent=4, cls=NumpyEncoder)
# Get the best configuration
best_parameter_config = min(tuning_results[0], key=lambda x: x['time'])
best_parameters = dict()
# Filter out parameters from results
for k, v in best_parameter_config.items():
if k not in tune_params:
continue
best_parameters[k] = v
# Add problem size and tuning technique to results
best_parameters["PROBLEM_SIZE"] = input_problem_size
best_parameters["TUNING_TECHNIQUE"] = arguments.technique
# Save the best results as a JSON file
with open("best-md-results.json", 'w') as f:
json.dump(best_parameters, f, indent=4, cls=NumpyEncoder)
|
#!/usr/bin/env python3
# Copyright 2021 Collabora, Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Helper script that reads drm_fourcc.h and writes a static table with the
# simpler format token modifiers
import sys
import re
filename = sys.argv[1]
towrite = sys.argv[2]
fm_re = {
'intel': r'^#define I915_FORMAT_MOD_(\w+)',
'others': r'^#define DRM_FORMAT_MOD_((?:ARM|SAMSUNG|QCOM|VIVANTE|NVIDIA|BROADCOM|ALLWINNER)\w+)\s',
'vendors': r'^#define DRM_FORMAT_MOD_VENDOR_(\w+)'
}
def print_fm_intel(f, f_mod):
f.write(' {{ DRM_MODIFIER_INTEL({}, {}) }},\n'.format(f_mod, f_mod))
# generic write func
def print_fm(f, vendor, mod, f_name):
f.write(' {{ DRM_MODIFIER({}, {}, {}) }},\n'.format(vendor, mod, f_name))
with open(filename, "r") as f:
data = f.read()
for k, v in fm_re.items():
fm_re[k] = re.findall(v, data, flags=re.M)
with open(towrite, "w") as f:
f.write('''\
/* AUTOMATICALLY GENERATED by gen_table_fourcc.py. You should modify
that script instead of adding here entries manually! */
static const struct drmFormatModifierInfo drm_format_modifier_table[] = {
''')
f.write(' { DRM_MODIFIER_INVALID(NONE, INVALID_MODIFIER) },\n')
f.write(' { DRM_MODIFIER_LINEAR(NONE, LINEAR) },\n')
for entry in fm_re['intel']:
print_fm_intel(f, entry)
for entry in fm_re['others']:
(vendor, mod) = entry.split('_', 1)
if vendor == 'ARM' and (mod == 'TYPE_AFBC' or mod == 'TYPE_MISC' or mod == 'TYPE_AFRC'):
continue
print_fm(f, vendor, mod, mod)
f.write('''\
};
''')
f.write('''\
static const struct drmFormatModifierVendorInfo drm_format_modifier_vendor_table[] = {
''')
for entry in fm_re['vendors']:
f.write(" {{ DRM_FORMAT_MOD_VENDOR_{}, \"{}\" }},\n".format(entry, entry))
f.write('''\
};
''')
|
# Dependencies: https://pypi.python.org/pypi/bitcoin >= 1.1.27
import binascii
import hashlib
import base64
import bitcoin
def verify(challenge_hidden, challenge_visual, pubkey, signature, version):
if version == 1:
message = binascii.unhexlify(challenge_hidden + binascii.hexlify(challenge_visual))
elif version == 2:
h1 = hashlib.sha256(binascii.unhexlify(challenge_hidden)).digest()
h2 = hashlib.sha256(challenge_visual).digest()
message = h1 + h2
else:
raise Exception('Unknown version')
signature_b64 = base64.b64encode(binascii.unhexlify(signature))
return bitcoin.ecdsa_verify(message, signature_b64, pubkey)
def main():
challenge_hidden = "cd8552569d6e4509266ef137584d1e62c7579b5b8ed69bbafa4b864c6521e7c2" # Use random value
challenge_visual = "2015-03-23 17:39:22"
pubkey = "023a472219ad3327b07c18273717bb3a40b39b743756bf287fbd5fa9d263237f45"
signature = "20f2d1a42d08c3a362be49275c3ffeeaa415fc040971985548b9f910812237bb41770bf2c8d488428799fbb7e52c11f1a3404011375e4080e077e0e42ab7a5ba02"
print verify(challenge_hidden, challenge_visual, pubkey, signature, 2)
if __name__ == '__main__':
main()
|
import os
from errno import ENOENT, EPERM
from stat import S_IFDIR, S_IFREG
from time import time
import logging
from datetime import datetime
import threading
import functools
import collections
from fuse import FUSE, Operations
from storage import GitStorage
log = logging.getLogger('spaghettifs.filesystem')
log.setLevel(logging.DEBUG)
WRITE_BUFFER_SIZE = 3 * 1024 * 1024 # 3MB
def memoize(size):
memo = collections.deque(maxlen=size)
def decorator(f):
@functools.wraps(f)
def wrapper(*args):
for key, value in memo:
if key == args:
break
else:
value = f(*args)
memo.append( (args, value) )
return value
wrapper.flush_memo = memo.clear
return wrapper
return decorator
class SpaghettiFS(Operations):
def __init__(self, repo):
self.repo = repo
self._write_count = 0
# the FUSE library seems to assume we're thread-safe, so we use a
# big fat lock, just in case
self._lock = threading.Lock()
@memoize(10)
def get_obj(self, path):
#assert(path.startswith('/'))
obj = self.repo.get_root()
for frag in path[1:].split('/'):
if frag == '':
continue
try:
obj = obj[frag]
except KeyError:
return None
return obj
def getattr(self, path, fh=None):
obj = self.get_obj(path)
if obj is None:
raise OSError(ENOENT, '')
if obj.is_dir:
st = dict(st_mode=(S_IFDIR | 0755), st_nlink=2)
else:
st = dict(st_mode=(S_IFREG | 0444), st_size=obj.size)
st['st_nlink'] = obj.inode['nlink']
# FUSE seeems to ignore our st_ino
#st['st_ino'] = int(obj.inode.name[1:])
st['st_ctime'] = st['st_mtime'] = st['st_atime'] = time()
return st
def create(self, path, mode):
parent_path, file_name = os.path.split(path)
parent = self.get_obj(parent_path)
parent.create_file(file_name)
self.get_obj.flush_memo()
return 0
def link(self, target, source):
source_obj = self.get_obj(source)
target_parent_obj = self.get_obj(os.path.dirname(target))
target_parent_obj.link_file(os.path.basename(target), source_obj)
self.get_obj.flush_memo()
def mkdir(self, path, mode):
parent_path, dir_name = os.path.split(path)
parent = self.get_obj(parent_path)
parent.create_directory(dir_name)
self.get_obj.flush_memo()
def read(self, path, size, offset, fh):
obj = self.get_obj(path)
if obj is None or obj.is_dir:
return ''
else:
return obj.read_data(offset, size)
def readdir(self, path, fh):
obj = self.get_obj(path)
return ['.', '..'] + list(obj.keys())
def rename(self, source, target):
source_obj = self.get_obj(source)
if source_obj.is_dir:
raise OSError(EPERM, '')
target_parent_obj = self.get_obj(os.path.dirname(target))
target_parent_obj.link_file(os.path.basename(target), source_obj)
source_obj.unlink()
self.get_obj.flush_memo()
def rmdir(self, path):
obj = self.get_obj(path)
if obj is None or not obj.is_dir:
return
obj.unlink()
self.get_obj.flush_memo()
def truncate(self, path, length, fh=None):
obj = self.get_obj(path)
if obj is None or obj.is_dir:
return
obj.truncate(length)
def unlink(self, path):
obj = self.get_obj(path)
if obj is None or obj.is_dir:
return
obj.unlink()
self.get_obj.flush_memo()
def write(self, path, data, offset, fh):
obj = self.get_obj(path)
if obj is None or obj.is_dir:
return 0
obj.write_data(data, offset)
if not self.repo.autocommit:
self._write_count += len(data)
if self._write_count > WRITE_BUFFER_SIZE:
self.repo.commit(amend=True, branch="mounted")
self._write_count = 0
return len(data)
# access = None
flush = None
getxattr = None
listxattr = None
open = None
opendir = None
release = None
releasedir = None
statfs = None
def __call__(self, op, path, *args):
log.debug('FUSE api call: %r %r %r',
op, path, tuple(LogWrap(arg) for arg in args))
ret = '[Unknown Error]'
self._lock.acquire()
try:
ret = super(SpaghettiFS, self).__call__(op, path, *args)
return ret
except OSError, e:
ret = str(e)
raise
finally:
self._lock.release()
log.debug('FUSE api return: %r %r', op, LogWrap(ret))
class LogWrap(object):
def __init__(self, value):
self.value = value
def __repr__(self):
if isinstance(self.value, basestring) and len(self.value) > 20:
r = repr(self.value[:12])
return '%s[...(len=%d)]%s' % (r[:11], len(self.value), r[-1])
else:
return repr(self.value)
def __str__(self):
return repr(self)
datefmt = lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S')
class _open_fs(object):
def __init__(self, repo_path, cls):
self.repo_path = repo_path
self.cls = cls
def __enter__(self):
self.time_mount = datetime.now()
self.repo = GitStorage(self.repo_path, autocommit=False)
self.git = self.repo.eg.git
master_id = self.git.refs['refs/heads/master']
self.initial_tree_id = self.git.commit(master_id).tree
msg = ("[temporary commit; currently mounted, since %s]" %
datefmt(self.time_mount))
self.repo.commit(msg, branch="mounted", head_id=master_id)
return self.cls(self.repo)
def __exit__(self, e0, e1, e2):
self.time_unmount = datetime.now()
msg = ("Mounted operations:\n mounted at %s\n unmounted at %s\n" %
(datefmt(self.time_mount), datefmt(self.time_unmount)))
self.repo.commit(msg, amend=True, branch="mounted")
mounted_id = self.git.refs['refs/heads/mounted']
mounted_tree_id = self.git.commit(mounted_id).tree
if mounted_tree_id != self.initial_tree_id:
self.git.refs['refs/heads/master'] = mounted_id
del self.git.refs['refs/heads/mounted']
def mount(repo_path, mount_path, cls=SpaghettiFS, loglevel=logging.ERROR):
if loglevel is not None:
stderr_handler = logging.StreamHandler()
stderr_handler.setLevel(loglevel)
logging.getLogger('spaghettifs').addHandler(stderr_handler)
with _open_fs(repo_path, cls) as fs:
FUSE(fs, mount_path, foreground=True)
|
import FWCore.ParameterSet.Config as cms
#--- reset HB/HE ZS to 2TS
#--- NB: may need appropriate HcalZSThresholds update
def customise_2TS(process):
process.simHcalDigis.HBregion = (2,5)
process.simHcalDigis.HEregion = (2,5)
process.simHcalDigis.use1ts = False
return(process)
|
def replace_letters(word):
|
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import Screen
from kivy.uix.stacklayout import StackLayout
from bidding import Bidding, playOrder
from bidding_tree import bids
from constants import clubs, colors, diamonds, hearts, spades
from mediator import Mediator
from uibuilders import (
ButtonKind, buildButton, buildLabel, buildNumericInput, buildToggle, buildMenu, buildText,
colors, gap, halfGap, smallSize)
class BiddingScreen(Screen):
currentNumber = None
currentColor = None
rootLayout = BoxLayout(orientation='vertical')
mediator: Mediator
def __init__(self, mediator, **kwargs):
super(Screen, self).__init__(**kwargs)
self.mediator = mediator
self.add_widget(self.rootLayout)
self.build()
def onAddBid(self, bid):
if self.mediator.bidding.isAllowed(bid) and not self.mediator.bidding.finished():
self.mediator.bidding.addBid(bid)
self.updateUI()
def onUndo(self):
self.mediator.bidding.removeLastBid()
self.updateUI()
def setBidding(self, bidding):
self.mediator.bidding = bidding
self.updateUI()
def onDisplay(self):
self.updateUI()
def updateUI(self):
def clearAndBuild(dt):
self.rootLayout.clear_widgets()
self.build()
Clock.schedule_once(clearAndBuild)
def buildHeaders(self):
headers = BoxLayout(orientation='vertical')
topButtons = BoxLayout(orientation='horizontal')
topButtons.add_widget(buildMenu(self.mediator, size_hint=(0.2, 1.0)))
topButtons.add_widget(buildButton('Terug', lambda ins: self.onUndo(), size_hint=(0.8, 1.0)))
headers.add_widget(topButtons)
suits = GridLayout(cols=4, spacing=[gap, 0], padding=[0, gap])
def createCallback(whoStarts):
return lambda instance: self.mediator.bidding.setWhoStarts(whoStarts)
# create buttons for suits
if self.mediator.bidding != None:
for elem in playOrder:
suits.add_widget(buildToggle(
elem, elem == self.mediator.bidding.whoStarts, createCallback(elem), 'whoStarts'))
headers.add_widget(suits)
return headers
def buildCurrentBidding(self):
currentBidding = GridLayout(cols=4)
# create empty boxes to start at correct starting point
numEmpty = playOrder.index(self.mediator.bidding.whoStarts)
for i in range(0, numEmpty):
currentBidding.add_widget(buildLabel(''))
# create labels for all bids
for bid in self.mediator.bidding.current:
currentBidding.add_widget(buildLabel(bid))
# create empty boxes to fill out the screen
for i in range(0, 32 - len(self.mediator.bidding.current)):
currentBidding.add_widget(buildLabel(''))
return currentBidding
def buildBidChooser(self):
bidLayout = BoxLayout(orientation='vertical')
def addButtons(addedbuttons, buttonKind):
result = BoxLayout(spacing=gap, padding=[0, halfGap])
for elem in addedbuttons:
def invoke(elem):
if elem == '?':
self.mediator.showAdvice()
return
elif buttonKind == ButtonKind.special:
self.onAddBid(elem)
return
elif buttonKind == ButtonKind.color:
self.currentColor = elem
elif buttonKind == ButtonKind.number:
self.currentNumber = elem
if (self.currentNumber != None and self.currentColor != None):
self.onAddBid(self.currentNumber + self.currentColor)
self.currentColor = self.currentNumber = None
def createCallback(elem):
return lambda instance: invoke(elem)
def isSelected(elem, buttonKind):
if buttonKind == ButtonKind.color:
return elem == self.currentColor
if buttonKind == ButtonKind.number:
return elem == self.currentNumber
return False
result.add_widget(buildToggle(elem, isSelected(
elem, buttonKind), createCallback(elem), "buttons_" + str(buttonKind)))
return result
numberBtns = addButtons(
['1', '2', '3', '4', '5', '6', '7'], ButtonKind.number)
colorBtns = addButtons(['♣', '♦', '♥', '♠', 'SA'], ButtonKind.color)
extraBtns = addButtons(['pass', 'X', 'XX', '?'], ButtonKind.special)
bidLayout.add_widget(numberBtns)
bidLayout.add_widget(colorBtns)
bidLayout.add_widget(extraBtns)
return bidLayout
def build(self):
topLayout = BoxLayout(orientation='vertical', size_hint=(1.0, 0.2))
topLayout.add_widget(self.buildHeaders())
if len(self.mediator.bidding.current) == 0:
currentBidding = buildText('''
BridgeBiddingBuddy is een app dat beginnende bridgers die onzeker zijn over hun bieding kan helpen.
U heeft zojuist ingevuld hoe uw hand eruit ziet, met die informatie en de bieding kan BidBud bepalen wat waarschijnlijk de beste volgende stap is.
Ook BidBud (de naam van het programma dat u helpt met biedingen) is niet perfect, dus lees de uitleg en denk goed na of dat past bij wat u wilt bereiken.
Druk als eerste stap op de windrichting die de leider is, en vul in welke bieding deze persoon doet.
Als u de leider bent geldt dit uiteraard ook.
Als u aan de beurt bent en u weet niet wat u moet doen kunt u op het vraagtekentje drukken.
Als de bieding klaar is kunt u deze bieding een naam geven door op het potloodje de drukken, zo kunt u als u de spellen nog wilt bespreken of bekijken later de bieding goed terug vinden.
Veel speel plezier! Van BidBud en van ons!
''')
else:
currentBidding = self.buildCurrentBidding()
if (not self.mediator.bidding.finished()):
bottom = BoxLayout(
orientation='vertical', size_hint=(1.0, 0.3))
bottom.add_widget(self.buildBidChooser())
else:
bottom = buildButton('Klaar', lambda i: self.mediator.closeBidding(), size_hint=(1.0, 0.1))
self.rootLayout.add_widget(topLayout)
self.rootLayout.add_widget(currentBidding)
self.rootLayout.add_widget(bottom)
|
from .listener import RequestSuccessListener
from .notifier import RequestSuccessNotifier
|
import os
from redis import Redis
from neo4j import GraphDatabase
"""
get_redis connects to a redis database and returns an instance of this
connection.
"""
def get_redis():
# if a custom IP for redis has been specified, use it, else default to localhost
redis_ip = os.environ.get('REDIS_IP')
if (redis_ip == None):
redis_ip = "localhost"
# if a custom port for redis has been specified, use it, else default to localhost
redis_port = os.environ.get('REDIS_PORT')
if (redis_port == None):
redis_port = "6379"
redis_password = os.environ.get('REDIS_PASSWORD')
if (redis_password == None):
return Redis(redis_ip, redis_port)
else:
return Redis(redis_ip, redis_port, password=redis_password)
"""
get_neo4j connects to a Neo4j database and returns an instance
of this connection.
"""
def get_neo4j():
neo4j_ip = os.environ.get('NEO4J_IP')
if (neo4j_ip == None):
neo4j_ip = "localhost"
neo4j_ip = "bolt://" + neo4j_ip + ":7687"
neo4j_user = os.environ.get('NEO4J_USER')
if (neo4j_user == None):
raise ValueError('NEO4J_USER environment variable must be set.')
neo4j_password = os.environ.get('NEO4J_PASS')
if (neo4j_password == None):
raise ValueError('NEO4J_PASS environment variable must be set.')
return GraphDatabase.driver(neo4j_ip, auth=(neo4j_user, neo4j_password),
encrypted=False)
"""
get_mysql connects to a MySQL Database and returns an instance of that
connection.
"""
def get_mysql():
mysql_user = os.environ.get('MYSQL_USER')
if (mysql_user == None):
raise ValueError('NEO4J_USER environment variable must be set.')
mysql_password = os.environ.get('MYSQL_PASSWORD')
if (mysql_password == None):
raise ValueError('NEO4J_USER environment variable must be set.')
mysql_host = os.environ.get('MYSQL_HOST')
if (mysql_host == None):
raise ValueError('NEO4J_USER environment variable must be set.')
mysql_db = os.environ.get('MYSQL_DB')
if (mysql_db == None):
raise ValueError('NEO4J_USER environment variable must be set.')
return mysql.connector.connect(user=mysql_user, password=mysql_password, host=mysql_host, database=mysql_db)
"""
get_depends_service returns the URL of the pom-search-service
"""
def get_depends_service():
return os.environ.get("DEPENDS_SERVICE_URL")
"""
get_queue_manager returns the URL of the queue manager for this manager-worker set.
"""
def get_queue_manager():
return os.environ.get("QUEUE_MANAGER")
|
# Generated by Django 2.1.2 on 2018-10-06 21:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cash', '0002_auto_20181006_2046'),
]
operations = [
migrations.AddField(
model_name='week',
name='_carry_over',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='expense',
name='actual_amount',
field=models.FloatField(blank=True, null=True),
),
]
|
from . import decisions
class Territory:
is_complex = False
is_coastal = False
is_inland = False
is_sea = False
def __init__(self, _id, name, neighbour_ids, contested=False):
self.id = _id
self.name = name
self.neighbour_ids = neighbour_ids
self.pieces = set()
self.neighbours = set()
self.named_coasts = set()
self.attacking_pieces = set()
self.retreating_pieces = set()
self.contested = contested
self.bounce_occurred = False
def __str__(self):
return self.name
def __repr__(self):
return f'{self.name} - {self.__class__.__name__}'
@property
def piece(self):
pieces = list(self.pieces)
if len(pieces) == 1:
return pieces[0]
if len(pieces) == 2:
return [p for p in pieces if p.retreating][0]
if not pieces:
return None
@property
def non_retreating_piece(self):
pieces = list(self.pieces)
if len(pieces) == 1:
return pieces[0]
if len(pieces) == 2:
return [p for p in pieces if not p.retreating][0]
if not pieces:
return None
@property
def hold_strength(self):
return decisions.HoldStrength(self)()
@property
def occupied(self):
return bool(self.piece)
def adjacent_to(self, territory):
return territory in self.neighbours
def friendly_piece_exists(self, nation):
"""
Determine whether a piece belonging to the given nation exists in the
territory.
Args:
* `nation` - `str`
Returns:
* `bool`
"""
if self.piece:
return self.piece.nation == nation
return False
def occupied_by(self, nation):
"""
Determine whether the territory is occupied by a piece belonging to the given nation
Args:
* `nation` - `str`
Returns:
* `bool`
"""
if self.occupied:
return self.piece.nation == nation
return False
def foreign_attacking_pieces(self, nation):
"""
Gets all pieces which are moving into this territory
who do not belong to the given.
Args:
* `nation` - `str`
Returns a list of piece instances
"""
foreign_attacking_pieces = list(self.attacking_pieces)
for p in foreign_attacking_pieces:
if p.nation == nation:
foreign_attacking_pieces.remove(p)
return foreign_attacking_pieces
def other_attacking_pieces(self, piece):
"""
Gets all pieces which are moving into this territory excluding the
given piece.
Args:
* `piece` - `Piece`
Returns:
* `list` of `Piece` instances.
"""
other_attacking_pieces = list(self.attacking_pieces)
for p in other_attacking_pieces:
if p == piece:
other_attacking_pieces.remove(p)
return other_attacking_pieces
def other_retreating_pieces(self, piece):
"""
Gets all pieces which are retreating into this territory excluding the
given piece.
Args:
* `piece` - `Piece`
Returns:
* `list` of `Piece` instances.
"""
other_retreating_pieces = list(self.retreating_pieces)
return [p for p in other_retreating_pieces if p != piece]
def to_dict(self):
return {
'id': self.id,
'bounce_occurred': self.bounce_occurred,
}
class LandTerritory(Territory):
def __init__(self, _id, name, nationality, neighbour_ids, supply_center=False, controlled_by=None, **kwargs):
super().__init__(_id, name, neighbour_ids, **kwargs)
self.nationality = nationality
self.supply_center = supply_center
self.controlled_by = controlled_by
class CoastalTerritory(LandTerritory):
is_coastal = True
def __init__(self, _id, name, nationality, neighbour_ids, shared_coast_ids, **kwargs):
super().__init__(_id, name, nationality, neighbour_ids, **kwargs)
self.shared_coast_ids = shared_coast_ids
self.shared_coasts = set()
@staticmethod
def accessible_by_piece_type(piece):
return True
@property
def is_complex(self):
return bool(self.named_coasts)
class InlandTerritory(LandTerritory):
is_inland = True
@staticmethod
def accessible_by_piece_type(piece):
return piece.__class__.__name__ == 'Army'
class SeaTerritory(Territory):
is_sea = True
@staticmethod
def accessible_by_piece_type(piece):
return piece.__class__.__name__ == 'Fleet'
|
import numpy as np
def dist_sphe(x, y, lon, lat):
"""Compute the spherical arc between two points on the unit sphere"""
return np.arccos(np.sin(lat)*np.sin(y)+np.cos(lat)*np.cos(y)*np.cos(lon-x))
def compute_weight(x, y, lon, lat, reso, degrees=False):
"""Compute the weight between points (x, y) and point (lon, lat) with
a gaussian filter """
if degrees:
cff=np.pi/180
dist = dist_sphe(x*cff, y*cff, lon*cff, lat*cff)
weight = np.exp(-0.5*(dist/(reso*cff))**2)
else:
dist = dist_sphe(x, y, lon, lat)
weight = np.exp(-0.5*(dist/reso)**2)
return weight
|
"""Define Rotest's TestSuite, composed from test suites or test cases."""
# pylint: disable=method-hidden,bad-super-call,too-many-arguments
# pylint: disable=too-many-locals
from __future__ import absolute_import
import unittest
from itertools import count, chain
from future.builtins import next
from rotest.common import core_log
from rotest.core.case import TestCase
from rotest.core.flow import TestFlow
from rotest.common.utils import get_work_dir
from rotest.core.result.result import Result
from rotest.common.config import ROTEST_WORK_DIR
from rotest.core.models.suite_data import SuiteData
class TestSuite(unittest.TestSuite):
"""Test composed from other test suites or test cases.
Test suite is defined by a sequence of :class:`rotest.core.case.TestCase`
or :class:`rotest.core.suite.TestSuite` that would run in a specific order.
The suite is responsible for running the test items one after the other.
Test authors should subclass TestCase for their own tests and override
**components** tuple with the required test items.
Attributes:
components (tuple): List of test classes, subclasses of
:class:`rotest.core.case.TestCase` or
:class:`rotest.core.suite.TestSuite`.
data (rotest.core.models.SuiteData): Contain information about
a test suite run.
TAGS (list): list of tags by which the test may be filtered.
IS_COMPLEX (bool): if this test is complex (may contain sub-tests).
"""
components = ()
TAGS = []
IS_COMPLEX = True
_cleanup = False
def __init__(self, tests=(), indexer=count(),
base_work_dir=ROTEST_WORK_DIR, save_state=True, config=None,
parent=None, run_data=None, enable_debug=False,
skip_init=False, resource_manager=None):
"""Initialize 'components' & add them to the suite.
Validates & initializes the TestSuite components & data object.
Args:
tests (iterable): tests to add to the suite.
base_work_dir (str): the base directory of the tests.
save_state (bool): flag to determine if storing the states of
resources is required.
config (AttrDict): dictionary of configurations.
indexer (iterator): the generator of test indexes.
parent (TestSuite): container of this test.
run_data (RunData): test run data object.
enable_debug (bool): whether to enable entering ipdb debugging mode
upon any exception in a test statement.
skip_init (bool): True to skip resources initialization and
validation of resources.
resource_manager (ClientResourceManager): tests' client resource
manager instance, leave None to create a new one for the test.
Raises:
AttributeError: if components tuple is empty.
TypeError: in case components tuple contains anything other than
classes inheriting from :class:`rotest.core.case.TestCase`,
:class:`rotest.core.suite.TestSuite`.
"""
super(TestSuite, self).__init__()
self.parent = parent
name = self.get_name()
self.identifier = next(indexer)
self.resource_manager = resource_manager
self.parents_count = self._get_parents_count()
self.config = config
if parent is not None:
parent.addTest(self)
core_log.debug("Initializing %r test-suite", name)
if len(self.components) == 0 and len(tests) == 0:
raise AttributeError("%s: Components tuple can't be empty" % name)
core_log.debug("Creating database entry for %r test-suite", name)
self.work_dir = get_work_dir(base_work_dir, name, self)
self.data = SuiteData(name=name, run_data=run_data)
for test_component in chain(self.components, tests):
if issubclass(test_component, TestCase):
for method_name in test_component.load_test_method_names():
test_item = test_component(parent=self,
config=config,
indexer=indexer,
run_data=run_data,
skip_init=skip_init,
save_state=save_state,
methodName=method_name,
enable_debug=enable_debug,
base_work_dir=self.work_dir,
resource_manager=resource_manager)
core_log.debug("Adding %r to %r", test_item, self.data)
elif issubclass(test_component, TestFlow):
test_item = test_component(parent=self,
config=config,
indexer=indexer,
run_data=run_data,
skip_init=skip_init,
save_state=save_state,
enable_debug=enable_debug,
base_work_dir=self.work_dir,
resource_manager=resource_manager)
core_log.debug("Adding %r to %r", test_item, self.data)
elif issubclass(test_component, TestSuite):
test_item = test_component(parent=self,
config=config,
indexer=indexer,
run_data=run_data,
skip_init=skip_init,
save_state=save_state,
enable_debug=enable_debug,
base_work_dir=self.work_dir,
resource_manager=resource_manager)
core_log.debug("Adding %r to %r", test_item, self.data)
else:
raise TypeError("Components under TestSuite must be classes "
"inheriting from TestCase or TestSuite, "
"got %r" % test_component)
core_log.debug("Initialized %r test-suite successfully", self.data)
def add_resources(self, resources):
"""Add the resources to the child tests.
Args:
resources (dict): dictionary of attributes name to resources
instance.
"""
for test in self:
test.add_resources(resources)
@classmethod
def get_name(cls):
"""Return test name as used in Django DB.
Returns:
str. test name as used in Django DB.
"""
return cls.__name__
def run(self, result, debug=False):
"""Run the tests under the suite and update its data object.
* Notify the data object that the test suite started.
* Call the test suite run method.
* Notify the data object that the test suite ended & update its result.
Args:
result (rotest.core.result.result.Result): Holder for
test result information.
debug (bool): If suite, tests will be run without collecting errors
in a TestResult.
Returns:
rotest.core.result.result.Result. holder for test result
information.
"""
if isinstance(result, Result):
result.startComposite(self)
core_log.debug("Running %r test-suite", self.data)
result = super(TestSuite, self).run(result, debug)
if isinstance(result, Result):
result.stopComposite(self)
return result
def _get_parents_count(self):
"""Get the number of ancestors.
Returns:
number. number of ancestors.
"""
if self.parent is None:
return 0
return self.parent.parents_count + 1
def start(self):
"""Update the data that the test started."""
self.data.start()
|
lines = []
with open('input-p67.txt') as f:
lines = f.readlines()
i = 0
for n in lines:
lines[i] = n.split(" ")
i = i+1
l_DP = [[int(lines[0][0])]]
i = 0
for n in lines[1:]:
temp_list=[]
for k in range(0,len(n),1):
#print(k)
if k == 0:
temp_list.append(int(n[k])+l_DP[i][k])
elif k == len(n)-1:
temp_list.append(int(n[k])+l_DP[i][k-1])
else:
temp_list.append(max(int(n[k])+l_DP[i][k],int(n[k])+l_DP[i][k-1]))
i = i+1
l_DP.append(temp_list)
last_line = len(l_DP)
answer = max(l_DP[last_line-1])
print(answer)
|
import json
import urllib.request
def get(status="In Service"):
'''
* Load stations of bike sharing.
*
* @param status - A status to filter stations (e.g., "In Service", "Not In Service", None; default: "In Service").
* @return stations - A list with stations and their data.
'''
url = 'https://feeds.citibikenyc.com/stations/stations.json'
response = urllib.request.urlopen(url).read().decode("utf-8")
stations = json.loads(response)
if not status:
return stations['stationBeanList']
result = []
for s in stations['stationBeanList']:
if s['statusValue'] == status:
result.append(s)
return result
|
import sys
sys.path.append("../..")
from MyCrypto.utils.galois_field import GF
from MyCrypto.utils.matrix import Matrix
class GF2_8(GF):
def __init__(self, data, order=8):
super().__init__(data, order)
class AES:
def __init__(self, raw_key):
self._reset_data()
self._reset_key(raw_key)
def run(self, data, method='encrypt'):
if method == 'encrypt':
return self._encrypt(data)
elif method == 'decrypt':
return self._decrypt(data)
def _encrypt(self, data):
data = self.split_bit(data, 128, 16)
state = [[data[i+j*4] for j in range(4)] for i in range(4)]
state = Matrix(state, dtype=GF2_8)
state = state + self._keys[0]
for i in range(1, 10):
self._sub_bytes(state)
state = self._shift_rows(state)
state = self._mix_cols(state)
state = self._add_round_key(state, self._keys[i])
self._sub_bytes(state)
state = self._shift_rows(state)
state = self._add_round_key(state, self._keys[10])
data = [state[i, j].data for j in range(4) for i in range(4)]
data = self.merge_bit(data, 8)
return data
def _decrypt(self, data):
data = self.split_bit(data, 128, 16)
state = [[data[i+j*4] for j in range(4)] for i in range(4)]
state = Matrix(state, dtype=GF2_8)
state = state + self._keys_inv[10]
for i in range(9, 0, -1):
self._sub_bytes_inv(state)
state = self._shift_rows_inv(state)
state = self._mix_cols_inv(state)
state = self._add_round_key(state, self._keys_inv[i])
self._sub_bytes_inv(state)
state = self._shift_rows_inv(state)
state = self._add_round_key(state, self._keys_inv[0])
data = [state[i, j].data for j in range(4) for i in range(4)]
data = self.merge_bit(data, 8)
return data
def _sub_bytes(self, state):
for i in range(state.row):
for j in range(state.col):
state[i, j] = self._s_box[self.split_bit(state[i, j].data, 8, 2)]
def _sub_bytes_inv(self, state):
for i in range(state.row):
for j in range(state.col):
state[i, j] = self._s_box_inv[self.split_bit(state[i, j].data, 8, 2)]
def _shift_rows(self, state):
mat_data = [[state[i, j].data for j in range(state.col)] for i in range(state.row)]
for i in range(4):
for j in range(i):
mat_data[i].append(mat_data[i].pop(0))
return Matrix(mat_data, dtype=GF2_8)
def _shift_rows_inv(self, state):
mat_data = [[state[i, j].data for j in range(state.col)] for i in range(state.row)]
for i in range(4):
for j in range(i):
mat_data[i].insert(0, mat_data[i].pop())
return Matrix(mat_data, dtype=GF2_8)
def _mix_cols(self, state):
transform = [
[2, 3, 1, 1],
[1, 2, 3, 1],
[1, 1, 2, 3],
[3, 1, 1, 2]
]
transform = Matrix(transform, dtype=GF2_8)
return transform * state
def _mix_cols_inv(self, state):
transform = [
[0xe, 0xb, 0xd, 0x9],
[0x9, 0xe, 0xb, 0xd],
[0xd, 0x9, 0xe, 0xb],
[0xb, 0xd, 0x9, 0xe]
]
transform = Matrix(transform, dtype=GF2_8)
return transform * state
def _add_round_key(self, state, key):
return state + key
def _reset_data(self):
s_matrix = [[i*16+j for j in range(16)] for i in range(16)]
s_matrix = Matrix(s_matrix, dtype=GF2_8)
a_matrix = [[1 if j in [i, (i+4)%8, (i+5)%8, (i+6)%8, (i+7)%8] else 0 for j in range(8)] for i in range(8)]
c_matrix = [[1], [1], [0], [0], [0], [1], [1], [0]]
a_matrix = Matrix(a_matrix, dtype=GF2_8)
c_matrix = Matrix(c_matrix, dtype=GF2_8)
for i in range(s_matrix.row):
for j in range(s_matrix.col):
if s_matrix[i, j] != GF2_8(0):
s_matrix[i, j] = s_matrix[i, j].inv
temp = self.split_bit(s_matrix[i, j].data, 8, 8)[::-1]
temp = [[i] for i in temp]
x_matrix = Matrix(temp, dtype=GF2_8)
temp = a_matrix * x_matrix + c_matrix
temp = [temp[i, 0].data for i in range(temp.row)][::-1]
temp = [self.merge_bit(temp, 1)][0]
s_matrix[i, j] = GF2_8(temp)
self._s_box = s_matrix
is_matrix = [[i*16+j for j in range(16)] for i in range(16)]
is_matrix = Matrix(is_matrix, dtype=GF2_8)
b_matrix = [[1 if j in [(i+2)%8, (i+5)%8, (i+7)%8] else 0 for j in range(8)] for i in range(8)]
d_matrix = [[1], [0], [1], [0], [0], [0], [0], [0]]
b_matrix = Matrix(b_matrix, dtype=GF2_8)
d_matrix = Matrix(d_matrix, dtype=GF2_8)
for i in range(is_matrix.row):
for j in range(is_matrix.col):
temp = self.split_bit(is_matrix[i, j].data, 8, 8)[::-1]
temp = [[i] for i in temp]
ix_matrix = Matrix(temp, dtype=GF2_8)
temp = b_matrix * ix_matrix + d_matrix
temp = [temp[i, 0].data for i in range(temp.row)][::-1]
temp = [self.merge_bit(temp, 1)][0]
is_matrix[i, j] = GF2_8(temp)
if is_matrix[i, j] != GF2_8(0):
is_matrix[i, j] = is_matrix[i, j].inv
self._s_box_inv = is_matrix
self._rcon_c = [GF2_8(1)]
for i in range(9):
self._rcon_c.append(GF2_8(2)*self._rcon_c[-1])
def _reset_key(self, raw_key):
def _rot_word(x):
x = x[:]
x.append(x.pop(0))
return x
def _sub_word(x):
x = x[:]
for i in range(len(x)):
x[i] = self._s_box[self.split_bit(x[i].data, 8, 2)]
return x
def _rcon(i):
x = [GF2_8(0) for i in range(4)]
x[0] = self._rcon_c[i]
return x
keys = []
cur_key = self.split_bit(raw_key, 128, 16)
cur_key = [[cur_key[i+j*4] for j in range(4)] for i in range(4)]
cur_key = Matrix(cur_key, dtype=GF2_8)
keys.append(cur_key)
for i in range(10):
w = [cur_key[j, 3] for j in range(4)]
x = _rot_word(w)
y = _sub_word(x)
z = [y[j] + _rcon(i)[j] for j in range(4)]
new_key = Matrix.construct(4, 4, dtype=GF2_8)
for j in range(4):
new_key[j, 0] = cur_key[j, 0] + z[j]
for r in range(3):
for j in range(4):
new_key[j, r+1] = new_key[j, r] + cur_key[j, r+1]
cur_key = new_key
keys.append(cur_key)
self._keys = keys
self._keys_inv = [self._mix_cols_inv(keys[i]) if (0<i<len(keys)-1) else keys[i] for i in range(len(keys))]
@staticmethod
def split_bit(data, length, time):
''' split data to tuple '''
assert length % time == 0
split_len = length // time
results = list()
for t in range(time):
results.insert(0, data & ((1 << split_len) - 1))
data >>= split_len
return results
@staticmethod
def merge_bit(data, length):
''' merge data tuple '''
result = 0
for d in data:
result <<= length
result ^= d
return result
if __name__ == '__main__':
key = 0x0f1571c947d9e8590cb7add6af7f6798
plaintext = 0x0123456789abcdeffedcba9876543210
aes = AES(key)
code = aes.run(plaintext)
print(hex(code))
print(hex(aes.run(code, method='decrypt')))
|
#!/usr/bin/env python
""" Convert a svg file into 2D triangle mesh.
"""
import argparse
import logging
import pymesh
import numpy as np
from numpy.linalg import norm
import os.path
from subprocess import check_call
from time import time
def parse_args():
parser = argparse.ArgumentParser(__doc__);
parser.add_argument("--engine", help="Triangulation engine", choices=(
"triangle_conforming_delaunay",
"triangle_constrained_delaunay",
"cgal_constrained_delaunay",
"cgal_conforming_delaunay",
"geogram_delaunay",
"jigsaw_frontal_delaunay",
"mmg_delaunay", "triwild"),
default="triangle_conforming_delaunay");
parser.add_argument("--resolve-self-intersection", "-r", action="store_true");
parser.add_argument("--with-frame", '-f', action="store_true");
parser.add_argument("--with-cell-label", "-l", action="store_true");
parser.add_argument("--with-cleanup", "-c", action="store_true");
parser.add_argument("--with-triangulation", "-t", action="store_true");
parser.add_argument("--stage", type=int, default=1);
parser.add_argument("--epsilon", type=float, default=1e-3);
parser.add_argument("--log", type=str, help="Logging level",
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="INFO");
parser.add_argument("--with-features", '-F', action="store_true",
help="TriWild specific option to preserve features");
parser.add_argument("input_svg");
parser.add_argument("output_mesh");
return parser.parse_args();
def get_logger(level):
numeric_level = getattr(logging, level, None);
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {}'.format(level));
logging.basicConfig(level=numeric_level);
return logging.getLogger("PyMesh.Triangulation");
def drop_zero_dim(wires):
# Trim zero dimension.
if wires.dim == 3:
vertices = wires.vertices;
assert(np.all(vertices[:,2] == 0));
vertices = vertices[:, [0,1]];
wires.load(vertices, wires.edges);
return wires;
def cleanup(wires, logger):
if wires.num_vertices == 0:
return wires;
start_time = time();
tol = 1e-6;
vertices, edges, __ = pymesh.remove_duplicated_vertices_raw(
wires.vertices, wires.edges, tol);
# Remove duplicated edges.
ordered_edges = np.sort(edges, axis=1);
__, unique_edge_ids, __ = pymesh.unique_rows(ordered_edges);
edges = edges[unique_edge_ids, :];
wires.load(vertices, edges);
# Remove topologically degenerate edges.
is_not_topologically_degenerate = edges[:,0] != edges[:,1];
if not np.all(is_not_topologically_degenerate):
wires.filter_edges(is_not_topologically_degenerate);
finish_time = time();
t = finish_time - start_time;
logger.info("Cleanup running time: {}".format(t));
return wires;
def add_frame(wires):
if wires.num_vertices == 0:
return wires;
vertices = wires.vertices;
edges = wires.edges;
bbox_min = np.amin(vertices, axis=0);
bbox_max = np.amax(vertices, axis=0);
bbox_center = 0.5 * (bbox_min + bbox_max);
diag_len = norm(bbox_max - bbox_min);
offset = np.ones(2) * diag_len / 20;
bbox_min -= offset;
bbox_max += offset;
frame_vertices = np.array([
[bbox_min[0], bbox_min[1]],
[bbox_max[0], bbox_min[1]],
[bbox_max[0], bbox_max[1]],
[bbox_min[0], bbox_max[1]],
]);
frame_edges = np.array([
[0, 1],
[1, 2],
[2, 3],
[3, 0],
]) + wires.num_vertices;
vertices = np.vstack([vertices, frame_vertices]);
edges = np.vstack([edges, frame_edges]);
wires.load(vertices, edges);
return wires;
def resolve_self_intersection(wires, logger):
if wires.num_vertices == 0:
return wires;
bbox_min, bbox_max = wires.bbox;
tol = norm(bbox_max - bbox_min) / 1000;
start_time = time();
vertices, edges = pymesh.snap_rounding(wires.vertices, wires.edges, tol);
finish_time = time();
t = finish_time - start_time;
logger.info("Snap rounding running time: {}".format(t));
wires.load(vertices, edges);
return wires;
def triangulate(wires, engine, stage, eps, logger, wire_file, json_file):
if wires.num_vertices == 0:
return pymesh.form_mesh(np.zeros((0, 2)), np.zeros((0,3)));
basename = os.path.splitext(wire_file)[0];
if engine == "triwild":
out_mesh = "{}_linear.msh".format(basename);
log_file = "{}_triwild.log".format(basename);
if json_file is not None:
command = "TriWild --mute-log --feature-envelope-r {} --stage {} --log-file {} --feature-input {} --output-linear-mesh --skip-eps --input {} --output {}".format(
eps, stage, log_file, json_file, wire_file, basename);
else:
command = "TriWild --mute-log --feature-envelope-r {} --stage {} --log-file {} --output-linear-mesh --skip-eps --input {} --output {}".format(
eps, stage, log_file, wire_file, basename);
print(command);
start_time = time();
check_call(command.split());
finish_time = time();
t = finish_time - start_time;
mesh = pymesh.load_mesh(out_mesh, drop_zero_dim=True);
else:
mesh, t = pymesh.triangulate_beta(wires.vertices, wires.edges,
engine=engine, with_timing=True);
logger.info("Triangulation running time: {}".format(t));
return mesh;
def compute_cell_labels(wires, mesh, logger):
start_time = time();
arrangement = pymesh.Arrangement2();
arrangement.points = wires.vertices;
arrangement.segments = wires.edges;
arrangement.run();
mesh.add_attribute("face_centroid");
centroids = mesh.get_face_attribute("face_centroid");
r = arrangement.query(centroids);
finish_time = time();
t = finish_time - start_time;
logger.info("Arrangement running time: {}".format(t));
cell_type = np.array([item[0] for item in r]);
cell_ids = np.array([item[1] for item in r]);
cell_ids[cell_type != pymesh.Arrangement2.ElementType.CELL] = -1;
mesh.add_attribute("cell");
mesh.set_attribute("cell", cell_ids);
def solve_heat_equation(mesh):
cell_ids = mesh.get_attribute("cell").ravel().astype(int);
cut_mesh = pymesh.cut_mesh(mesh, cell_ids);
tree = pymesh.AABBTree2();
tree.load_data(cut_mesh.vertices, cut_mesh.boundary_edges);
sq_dist, indices = tree.look_up(mesh.vertices);
mesh.add_attribute("sq_dist");
mesh.set_attribute("sq_dist", sq_dist);
return mesh;
def main():
args = parse_args();
logger = get_logger(args.log);
logger.info("Triangulation engine: {}".format(args.engine));
wires = pymesh.wires.WireNetwork.create_from_file(args.input_svg);
wires = drop_zero_dim(wires);
if wires.num_vertices == 0:
logger.warn("Input is empty");
if args.with_frame and args.engine != "triwild":
wires = add_frame(wires);
if args.resolve_self_intersection:
wires = resolve_self_intersection(wires, logger);
if args.with_cleanup:
wires = cleanup(wires, logger);
if args.with_features:
json_file = "{}.json".format(os.path.splitext(args.input_svg)[0]);
assert(os.path.exists(json_file));
else:
json_file = None;
basename = os.path.splitext(args.output_mesh)[0];
wire_file = basename + ".wire";
wires.write_to_file(wire_file);
if args.with_triangulation:
if os.path.splitext(args.input_svg)[1] != ".svg" and args.engine == "triwild":
# Avoid data loss from conversion.
assert(not args.with_cleanup);
assert(not args.resolve_self_intersection);
with open(args.input_svg, 'r') as fin:
data = fin.read();
with open(wire_file, 'w') as fout:
fout.write(data);
mesh = triangulate(wires, args.engine, args.stage, args.epsilon, logger, wire_file, json_file);
if mesh.num_vertices > 0 and args.with_cell_label:
compute_cell_labels(wires, mesh, logger);
mesh = solve_heat_equation(mesh);
pymesh.save_mesh(args.output_mesh, mesh, "cell", "sq_dist");
else:
pymesh.save_mesh(args.output_mesh, mesh);
if __name__ == "__main__":
main();
|
import tensorflow as tf
from pymystem3 import Mystem
from utils import load_dumped, get_dataframe, clean_text
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.backend import set_session
from keras.preprocessing.sequence import pad_sequences
from lenta_training import MAX_SEQUENCE_LENGTH
sess = tf.Session()
class ModelMeta(type):
__tokenizer_path: str = 'data/tokenizer_lenta.dump'
__model_name = 'models/weights-08-1.09.hdf5'
__dummies_path = 'data/dummies.dump'
def __init__(self, name, bases, attrs):
self.tokenizer = load_dumped(self.__tokenizer_path)
self.labels = load_dumped(self.__dummies_path)
set_session(sess)
self.model = load_model(self.__model_name)
self.model._make_predict_function()
self.graph = tf.get_default_graph()
super().__init__(name, bases, attrs)
class Model(metaclass=ModelMeta):
def predict(self, text):
cleaned_texts = clean_text([text, ])
X = self.tokenizer.texts_to_sequences([cleaned_texts])
X = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)
with self.graph.as_default():
set_session(sess)
y = self.model.predict(X, steps=1)
labels = []
for result in y:
# places indices
rargsort = result.argsort()[::-1][:5]
for indice in rargsort:
labels.append(f'{self.labels[indice]}: {result[indice]}')
return labels
|
import subprocess
from pathlib import Path
from natsort import os_sorted
from colorama import Fore, Style, init as init_colorama
# Directory of this file ( /serene/compiler/ )
here = Path(__file__).parent.resolve()
init_colorama()
existing_coverage = False
paths = os_sorted([Path(x) for x in here.glob("./tests/t*.sn")])
for p in paths:
print()
print('Testing', p.name, 'now...')
if not existing_coverage:
serene_completed_process = subprocess.run(['coverage', 'run', 'serene', p, '-o', './temp/test_compiled'], cwd=here, capture_output=True, text=True)
existing_coverage = True
else:
serene_completed_process = subprocess.run(['coverage', 'run', '--append', 'serene', p, '-o', './temp/test_compiled'], cwd=here, capture_output=True, text=True)
if serene_completed_process.returncode == 0:
print(f"{Fore.GREEN}> Success!{Style.RESET_ALL}")
else:
print(f"{Fore.YELLOW}{serene_completed_process.stderr}{Style.RESET_ALL}")
print(f"{Fore.RED}{Style.BRIGHT}> Failed with error code {serene_completed_process.returncode}.{Style.RESET_ALL}")
|
import pyautogui
import time
import os
def main():
z = '0'
name = "0.png"
path = "./1/"
os.mkdir(path)
for x in range(0,50):
pyautogui.screenshot(path+name)
time.sleep(0.5)
pyautogui.hotkey('pagedown')
def start():
print("5..")
time.sleep(1)
print("4..")
time.sleep(1)
print("3..")
time.sleep(1)
print("2..")
time.sleep(1)
print("1..")
time.sleep(1)
print("Screen!")
start()
main()
|
#!/usr/bin/env python2
import xml.etree.cElementTree as ET
import logging
import contextlib
import re
import subprocess
from ssg.constants import OSCAP_RULE
from ssg.constants import PREFIX_TO_NS
from ssg.constants import bash_system as bash_rem_system
from ssg.constants import ansible_system as ansible_rem_system
from ssg.constants import puppet_system as puppet_rem_system
from ssg.constants import anaconda_system as anaconda_rem_system
from ssg.constants import ignition_system as ignition_rem_system
SYSTEM_ATTRIBUTE = {
'bash': bash_rem_system,
'ansible': ansible_rem_system,
'puppet': puppet_rem_system,
'anaconda': anaconda_rem_system,
'ignition': ignition_rem_system,
}
logging.getLogger(__name__).addHandler(logging.NullHandler())
def get_all_xccdf_ids_in_datastream(datastream):
root = ET.parse(datastream).getroot()
checklists_node = root.find(".//ds:checklists", PREFIX_TO_NS)
if checklists_node is None:
logging.error(
"Checklists not found within DataStream")
all_checklist_components = checklists_node.findall('ds:component-ref',
PREFIX_TO_NS)
xccdf_ids = [component.get("id") for component in all_checklist_components]
return xccdf_ids
def infer_benchmark_id_from_component_ref_id(datastream, ref_id):
root = ET.parse(datastream).getroot()
component_ref_node = root.find("*//ds:component-ref[@id='{0}']"
.format(ref_id), PREFIX_TO_NS)
if component_ref_node is None:
msg = (
'Component reference of Ref-Id {} not found within datastream'
.format(ref_id))
raise RuntimeError(msg)
comp_id = component_ref_node.get('{%s}href' % PREFIX_TO_NS['xlink'])
comp_id = comp_id.lstrip('#')
query = ".//ds:component[@id='{}']/xccdf-1.2:Benchmark".format(comp_id)
benchmark_node = root.find(query, PREFIX_TO_NS)
if benchmark_node is None:
msg = (
'Benchmark not found within component of Id {}'
.format(comp_id)
)
raise RuntimeError(msg)
return benchmark_node.get('id')
@contextlib.contextmanager
def datastream_root(ds_location, save_location=None):
try:
tree = ET.parse(ds_location)
for prefix, uri in PREFIX_TO_NS.items():
ET.register_namespace(prefix, uri)
root = tree.getroot()
yield root
finally:
if save_location:
tree.write(save_location)
def remove_machine_platform(root):
remove_machine_only_from_element(root, "xccdf-1.2:Rule")
remove_machine_only_from_element(root, "xccdf-1.2:Group")
def remove_machine_only_from_element(root, element_spec):
query = ".//ds:component/xccdf-1.2:Benchmark//{0}".format(element_spec)
elements = root.findall(query, PREFIX_TO_NS)
for el in elements:
platforms = el.findall("./xccdf-1.2:platform", PREFIX_TO_NS)
for p in platforms:
if p.get("idref") == "cpe:/a:machine":
el.remove(p)
def get_oscap_supported_cpes():
"""
Obtain a list of CPEs that the scanner supports
"""
result = []
proc = subprocess.Popen(
("oscap", "--version"), text=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=3)
except subprocess.TimeoutExpired:
logging.warn("Scanner timeouted when asked about supported CPEs")
proc.kill()
return []
if proc.returncode != 0:
first_error_line = errs.split("\n")[0]
logging.warn("Error getting CPEs from the scanner: {msg}".format(msg=first_error_line))
cpe_regex = re.compile(r'\bcpe:\S+$')
for line in outs.split("\n"):
match = cpe_regex.search(line)
if match:
result.append(match.group(0))
return result
def add_platform_to_benchmark(root, cpe_regex):
benchmark_query = ".//ds:component/xccdf-1.2:Benchmark"
benchmarks = root.findall(benchmark_query, PREFIX_TO_NS)
if not benchmarks:
msg = (
"No benchmarks found in the datastream"
)
raise RuntimeError(msg)
all_cpes = get_oscap_supported_cpes()
regex = re.compile(cpe_regex)
cpes_to_add = []
for cpe_str in all_cpes:
if regex.search(cpe_str):
cpes_to_add.append(cpe_str)
if not cpes_to_add:
cpes_to_add = [cpe_regex]
for benchmark in benchmarks:
existing_platform_element = benchmark.find("xccdf-1.2:platform", PREFIX_TO_NS)
if existing_platform_element is None:
logging.warn(
"Couldn't find platform element in a benchmark, "
"not adding any additional platforms as a result.")
continue
platform_index = list(benchmark).index(existing_platform_element)
for cpe_str in cpes_to_add:
e = ET.Element("xccdf-1.2:platform", idref=cpe_str)
benchmark.insert(platform_index, e)
def _get_benchmark_node(datastream, benchmark_id, logging):
root = ET.parse(datastream).getroot()
benchmark_node = root.find(
"*//xccdf-1.2:Benchmark[@id='{0}']".format(benchmark_id), PREFIX_TO_NS)
if benchmark_node is None:
if logging is not None:
logging.error(
"Benchmark ID '{}' not found within DataStream"
.format(benchmark_id))
return benchmark_node
def get_all_profiles_in_benchmark(datastream, benchmark_id, logging=None):
benchmark_node = _get_benchmark_node(datastream, benchmark_id, logging)
all_profiles = benchmark_node.findall('xccdf-1.2:Profile', PREFIX_TO_NS)
return all_profiles
def get_all_rule_selections_in_profile(datastream, benchmark_id, profile_id, logging=None):
benchmark_node = _get_benchmark_node(datastream, benchmark_id, logging)
profile = benchmark_node.find("xccdf-1.2:Profile[@id='{0}']".format(profile_id), PREFIX_TO_NS)
rule_selections = profile.findall("xccdf-1.2:select[@selected='true']", PREFIX_TO_NS)
return rule_selections
def get_all_rule_ids_in_profile(datastream, benchmark_id, profile_id, logging=None):
rule_selections = get_all_rule_selections_in_profile(datastream, benchmark_id,
profile_id, logging=None)
rule_ids = [select.get("idref") for select in rule_selections]
# Strip xccdf 1.2 prefixes from rule ids
# Necessary to search for the rules within test scenarios tree
prefix_len = len(OSCAP_RULE)
return [rule[prefix_len:] for rule in rule_ids]
def benchmark_get_applicable_platforms(datastream, benchmark_id, logging=None):
"""
Returns a set of CPEs the given benchmark is applicable to.
"""
benchmark_node = _get_benchmark_node(datastream, benchmark_id, logging)
platform_elements = benchmark_node.findall('xccdf-1.2:platform', PREFIX_TO_NS)
cpes = {platform_el.get("idref") for platform_el in platform_elements}
return cpes
def find_rule_in_benchmark(datastream, benchmark_id, rule_id, logging=None):
"""
Returns rule node from the given benchmark.
"""
benchmark_node = _get_benchmark_node(datastream, benchmark_id, logging)
rule = benchmark_node.find(".//xccdf-1.2:Rule[@id='{0}']".format(rule_id), PREFIX_TO_NS)
return rule
def find_fix_in_benchmark(datastream, benchmark_id, rule_id, fix_type='bash', logging=None):
"""
Return fix from benchmark. None if not found.
"""
rule = find_rule_in_benchmark(datastream, benchmark_id, rule_id, logging)
if rule is None:
return None
system_attribute = SYSTEM_ATTRIBUTE.get(fix_type, bash_rem_system)
fix = rule.find("xccdf-1.2:fix[@system='{0}']".format(system_attribute), PREFIX_TO_NS)
return fix
|
#!/usr/bin/python3
import socket
import time
from picamera2.encoders import H264Encoder
from picamera2.outputs import FileOutput
from picamera2 import Picamera2
picam2 = Picamera2()
video_config = picam2.video_configuration({"size": (1280, 720)})
picam2.configure(video_config)
encoder = H264Encoder(1000000)
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.connect(("REMOTEIP", 10001))
stream = sock.makefile("wb")
picam2.start_recording(encoder, FileOutput(stream))
time.sleep(20)
picam2.stop_recording()
|
import unittest
from tests.base import BaseTestCase
class ApiGroupApiTestCase(BaseTestCase):
def test_get_api_group_list(self):
pass
def test_new_api_group(self):
pass
def test_delete_api_group(self):
pass
if __name__ == '__main__':
unittest.main()
|
import logging
import os
import random
from pathlib import Path
from typing import Any, Dict, Tuple
import matplotlib.pyplot as plt
import numpy as np
import yaml
np.set_printoptions(suppress=True)
plt.rcParams["font.size"] = 16
BASE_DIR = Path(__file__).resolve().parents[2] # python_project_template
SRC_DIR = BASE_DIR / "src"
LOG_FILE = BASE_DIR / "logs" / "log.txt"
class CFG:
def __init__(self) -> None:
with open(SRC_DIR / "config" / "config.yml") as file:
self._config = yaml.safe_load(file.read())
def __getattr__(self, name: str) -> Any:
if name in self._config:
return self._config[name]
raise AttributeError(f"{name} is not found in config")
def set_seed(seed: int = 4) -> None:
"""seedを固定
Args:
seed (int, optional): シードの値 Defaults to 4.
"""
np.random.seed(seed)
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
# tf.random.set_seed(CFG.seed) # type: ignore
cfg = CFG()
set_seed(cfg.seed)
class CustomLoggerAdapter(logging.LoggerAdapter):
"""ロガーをカスタマイズするためのクラス
- ログのextraにkeyが渡されなかったときにデフォルト値を追加する
Args:
logging (Logger): カスタマイズするロガー
"""
def process( # type: ignore[override]
self, msg: str, kwargs: Dict[str, Any]
) -> Tuple[str, Dict[str, Any]]:
"""ログをとる処理で呼び出される
Args:
msg (str): ログメッセージ
kwargs (Dict[str, Any]): ログメッセージに渡す引数 ex{'extra': {'key': 'deprecated'}}
Returns:
Tuple[str, Dict[str, Any]]: {"key": self.extra}を追加したログメッセージと引数
"""
if "extra" not in kwargs or "key" not in kwargs["extra"]:
kwargs["extra"] = self.extra
return msg, kwargs
def set_logger(modname: str, set_level: str = cfg.mode) -> logging.Logger:
"""loggingに関する設定
Args:
modname (str): モジュールの名前(__name__を推奨)
set_level (str): loggingを出力するレベル
Raises:
ValueError: set_levelがレベルでない場合
Example:
>>> import settings
>>> logger = settings.set_logger(__name__, set_level="WARNING")
>>> logger.warning(
>>> list(self.model.parameters()),
>>> extra={"key": "2dimensional_data"}
>>> )
"""
set_level = set_level.upper()
if set_level not in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]:
raise ValueError("set_levelが不正です: {set_level}")
formatter = logging.Formatter(
"----------\n{asctime} [{levelname}] {filename} {funcName} {lineno}\n"
"{key}\n\n{message}\n",
"%Y-%m-%d %H:%M:%S",
style="{",
)
# ストリームハンドラの設定
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(getattr(logging, set_level))
# ファイルハンドラの設定
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setFormatter(formatter)
file_handler.setLevel(getattr(logging, set_level))
logger = logging.getLogger(modname)
logger.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
logger.propagate = False
default_extra = {"key": "None"}
logger = CustomLoggerAdapter(logger, default_extra) # type: ignore
return logger
|
reaction_logger = {
"added": {
"title": "Emoji added:",
"content": "User: {member.mention} (`{member.id}`)\n"
"Channel: {channel.mention} (`{channel.id}`)\n"
"Emoji: {emoji.name}\n",
"footer": {
"text": "Emoji Logger",
"icon": "{guild.icon_url}",
"timestamp": True
},
"color": {
"random": False,
"color": 0x00ff00
}
},
"removed": {
"title": "Emoji removed:",
"content": "User: {member.mention} (`{member.id}`)\n"
"Channel: {channel.mention} (`{channel.id}`)\n"
"Emoji: {emoji.name}\n",
"footer": {
"text": "Emoji Logger",
"icon": "{guild.icon_url}",
"timestamp": True
},
"color": {
"random": False,
"color": 0xff0000
}
}
}
|
# coding=utf8
import os
from .geo import geo_entity_extractor, geo_gnn_entity_matcher
from .atis import atis_entity_extractor, atis_gnn_entity_matcher
def get_gnn_entity_matcher(task, language):
matcher = None
if task == 'geo':
base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
if language in ['funql', 'prolog']:
matcher = geo_gnn_entity_matcher.GeoGNNEntityMatcher(entity_path)
elif language == 'lambda':
matcher = geo_gnn_entity_matcher.GeoLambdaCalculusGNNEntityMatcher(entity_path)
elif language == 'sql':
matcher = geo_gnn_entity_matcher.GeoSQLGNNEntityMatcher(entity_path)
elif task == 'atis':
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'atis', 'db')
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4',]:
matcher = atis_gnn_entity_matcher.ATISGNNLambdaCalculusEntityMatcher(db_path)
elif language in ['funql', 'prolog']:
matcher = atis_gnn_entity_matcher.ATISGNNEntityMatcher(db_path)
return matcher
def get_gnn_entity_extractor(task, language):
"""
Extract entities from logical form
:param task:
:param language:
:return:
"""
extractor = None
if task == 'geo':
if language == 'funql':
extractor = geo_entity_extractor.funql_entity_extractor
elif language == 'prolog':
extractor = geo_entity_extractor.prolog_entity_extractor
elif language == 'lambda':
extractor = geo_entity_extractor.lambda_calculus_entity_extractor
elif language == 'sql':
extractor = geo_entity_extractor.sql_entity_extractor
elif task == 'atis':
if language == 'lambda':
extractor = atis_entity_extractor.lambda_calculus_entity_extractor
elif language == 'funql':
extractor = atis_entity_extractor.funql_entity_extractor
elif language == 'prolog':
extractor = atis_entity_extractor.prolog_entity_extractor
return extractor
def get_gnn_entity_replacer(task, language):
"""
Replace entities in logical form with recognized entities from utterance
:param task:
:param language:
:return:
"""
replacer = None
if task == 'geo':
if language == 'funql':
replacer = geo_entity_extractor.replace_funql_entity
elif language == 'prolog':
replacer = geo_entity_extractor.replace_prolog_entity
elif language == 'lambda':
replacer = geo_entity_extractor.replace_lambda_calculus_entity
elif language == 'sql':
replacer = geo_entity_extractor.replace_sql_entity
elif task == 'atis':
if language == 'lambda':
replacer = atis_entity_extractor.replace_lambda_calculus_entity
elif language == 'funql':
replacer = atis_entity_extractor.replace_funql_entity
elif language == 'prolog':
replacer = atis_entity_extractor.replace_prolog_entity
return replacer
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ADS-B Cursor-on-Target Gateway Function Tests."""
import asyncio
import csv
import io
import urllib
import xml.etree.ElementTree
import pytest
import stratuxcot
import stratuxcot.functions
__author__ = 'Greg Albrecht W2GMD <oss@undef.net>'
__copyright__ = 'Copyright 2021 Orion Labs, Inc.'
__license__ = 'Apache License, Version 2.0'
# Sample JSON data:
#
# {
# 'Icao_addr': 11160165,
# 'Reg': 'N762QS',
# 'Tail': 'N762QS',
# 'Squawk': 0,
# 'Lat': 37.89692,
# 'Lng': -122.74547,
# 'Addr_type': 0,
# 'Age': 28.29,
# 'AgeLastAlt': 1.33,
# 'Alt': 21850,
# 'AltIsGNSS': False,
# 'Bearing': 0,
# 'BearingDist_valid': False,
# 'Distance': 0,
# 'Emitter_category': 0,
# 'ExtrapolatedPosition': False,
# 'GnssDiffFromBaroAlt': -275,
# 'LastSent': '0001-01-01T00:39:16.44Z',
# 'Last_GnssDiff': '0001-01-01T00:39:53.84Z',
# 'Last_GnssDiffAlt': 21775,
# 'Last_alt': '0001-01-01T00:39:54.77Z',
# 'Last_seen': '0001-01-01T00:39:54.77Z',
# 'Last_source': 1,
# 'Last_speed': '0001-01-01T00:39:53.84Z',
# 'NACp': 10,
# 'NIC': 8,
# 'OnGround': False,
# 'Position_valid': True,
# 'PriorityStatus': 0,
# 'SignalLevel': -28.21023052706831,
# 'Speed': 340,
# 'Speed_valid': True,
# 'TargetType': 1,
# 'Timestamp': '2020-11-06T19:58:06.234Z',
# 'Track': 249,
# 'Vvel': 3392
# }
#
#
# "Last_seen":"0001-01-01T00:43:19.61Z" (ws://192.168.10.1/traffic) 0001-01-01 is day zero,
# +
# "GPSTime":"2020-05-12T08:27:10Z" (http://192.168.10.1/getSituation)
# -
# ("Uptime":2610230,ms)"UptimeClock":"0001-01-01T00:43:30.23Z" (http://192.168.10.1/getStatus)
# = Timestamp of traffic "event"
#
#
# This is an illuminated/commented version of the traffic output from StratuX:
# type TrafficInfo struct {
# Icao_addr uint32 // decimal version of (ICAO HEX or ICAO OCTAL)
# Reg string // Registration. Calculated from Icao_addr for civil aircraft of US registry.
# Tail string // Callsign. Transmitted by aircraft. 8 Characters max including spaces
# Emitter_category uint8 // Formatted using GDL90 standard 3.5.1.10 Table 11, e.g. in a Mode ES report, A7 becomes 0x07, B0 becomes 0x08, etc.
# OnGround bool // Air-ground status. On-ground is "true".
# Addr_type uint8 // UAT address qualifier. Used by GDL90 format, so translations for ES TIS-B/ADS-R are needed. 3.5.1.2 Target Identity
# (GDL90 ICD)
# TargetType uint8 // types decribed in const above https://github.com/cyoung/stratux/blob/master/main/traffic.go#L66
# SignalLevel float64 // Signal level, dB RSSI.
# Squawk int // Squawk code
# Position_valid bool // false = MODE-S message without location data
# Lat float32 // decimal degrees, north positive
# Lng float32 // decimal degrees, east positive
# Alt int32 // Pressure altitude, feet
# GnssDiffFromBaroAlt int32 // GNSS altitude above WGS84 datum. Reported in TC 20-22 messages (negative = below BaroAlt, smaller magnitude)
# AltIsGNSS bool // Pressure alt = 0; GNSS alt = 1
# NIC int // Navigation Integrity Category.
# NACp int // Navigation Accuracy Category for Position.
# Track uint16 // degrees true
# Speed uint16 // knots
# Speed_valid bool // set when speed report received.
# Vvel int16 // feet per minute
# Timestamp time.Time // timestamp of traffic message, UTC
# PriorityStatus uint8 // Emergency or priority code as defined in GDL90 spec, DO-260B (Type 28 msg) and DO-282B
# // Parameters starting at 'Age' are calculated from last message receipt on each call of sendTrafficUpdates().
# // Mode S transmits position and track in separate messages, and altitude can also be
# // received from interrogations.
# Age float64 // Age of last valid position fix, seconds ago.
# AgeLastAlt float64 // Age of last altitude message, seconds ago.
# Last_seen time.Time // Time of last position update (stratuxClock). Used for timing out expired data.
# Last_alt time.Time // Time of last altitude update (stratuxClock).
# Last_GnssDiff time.Time // Time of last GnssDiffFromBaroAlt update (stratuxClock).
# Last_GnssDiffAlt int32 // Altitude at last GnssDiffFromBaroAlt update.
# Last_speed time.Time // Time of last velocity and track update (stratuxClock).
# Last_source uint8 // Last frequency on which this target was received.
# ExtrapolatedPosition bool //TODO: True if Stratux is "coasting" the target from last known position.
# BearingDist_valid bool // set when bearing and distance information is valid
# Bearing float64 // Bearing in degrees true to traffic from ownship, if it can be calculated. Units: degrees.
# Distance float64 // Distance to traffic from ownship, if it can be calculated. Units: meters.
# //FIXME: Rename variables for consistency, especially "Last_".
#
@pytest.fixture
def sample_craft():
return {
"Icao_addr":10698088,
"Reg":"N308DU",
"Tail":"DAL1352",
"Emitter_category":3,
"OnGround": False,
"Addr_type":0,
"TargetType":1,
"SignalLevel":-35.5129368009492,
"Squawk":3105,
"Position_valid":True,
"Lat":37.46306,
"Lng":-122.264626,
"Alt":7325,
"GnssDiffFromBaroAlt":25,
"AltIsGNSS":False,
"NIC":8,
"NACp":10,
"Track":135,
"Speed":262,
"Speed_valid":True,
"Vvel":-1600,
"Timestamp":"2021-05-19T23:13:18.484Z",
"PriorityStatus":0,
"Age":29.85,
"AgeLastAlt":29.83,
"Last_seen":"0001-01-01T16:43:24.75Z",
"Last_alt":"0001-01-01T16:43:24.77Z",
"Last_GnssDiff":"0001-01-01T16:43:24.54Z",
"Last_GnssDiffAlt":7700,
"Last_speed":"0001-01-01T16:43:24.54Z",
"Last_source":1,
"ExtrapolatedPosition":False,
"BearingDist_valid":True,
"Bearing":148.05441175901748,
"Distance":38889.68863349082,
"LastSent":"0001-01-01T16:43:22.85Z"
}
@pytest.fixture
def sample_known_craft():
sample_csv = """DOMAIN,AGENCY,REG,CALLSIGN,TYPE,MODEL,HEX,COT,TYPE,,
EMS,CALSTAR,N832CS,CALSTAR7,HELICOPTER,,,a-f-A-C-H,HELICOPTER,,
EMS,REACH AIR MEDICAL,N313RX,REACH16,HELICOPTER,,,a-f-A-C-H,HELICOPTER,,
FED,USCG,1339,C1339,FIXED WING,,,,FIXED WING,,
FIRE,USFS,N143Z,JUMPR43,FIXED WING,DH6,,a-f-A-C-F,FIXED WING,,
FIRE,,N17085,TNKR_911,FIXED WING,,,a-f-A-C-F,FIXED WING,,
FIRE,CAL FIRE,N481DF,C_104,HELICOPTER,,,a-f-A-C-H,HELICOPTER,,
FOOD,EL FAROLITO,N739UL,TACO_01,HELICOPTER,,,a-f-A-T-A-C-O,HELICOPTER,,
FOOD,EL FAROLITO,DAL1352,TACO_02,FIXED WING,,,a-f-A-T-A-C-O,FIXED WING,,
"""
csv_fd = io.StringIO(sample_csv)
all_rows = []
reader = csv.DictReader(csv_fd)
for row in reader:
all_rows.append(row)
print(all_rows)
return all_rows
def test_stratux_to_cot_raw(sample_craft):
print(sample_craft)
cot = stratuxcot.functions.stratux_to_cot_raw(sample_craft)
print(cot)
assert isinstance(cot, xml.etree.ElementTree.Element)
assert cot.tag == "event"
assert cot.attrib["version"] == "2.0"
assert cot.attrib["type"] == "a-.-A-C-F"
assert cot.attrib["uid"] == "ICAO-A33D68"
point = cot.findall("point")
assert point[0].tag == "point"
assert point[0].attrib["lat"] == "37.46306"
assert point[0].attrib["lon"] == "-122.264626"
assert point[0].attrib["hae"] == "2232.6600000000003"
detail = cot.findall("detail")
assert detail[0].tag == "detail"
assert detail[0].attrib["uid"] == "ICAO-A33D68"
track = detail[0].findall("track")
assert track[0].attrib["course"] == "135"
assert track[0].attrib["speed"] == "134.78432800000002"
def test_stratux_to_cot(sample_craft):
cot = stratuxcot.stratux_to_cot(sample_craft)
assert isinstance(cot, bytes)
assert b"a-.-A-C-F" in cot
assert b"DAL1352" in cot
assert b"ICAO-A33D68" in cot
assert b'speed="134.78432800000002"' in cot
def test_stratux_to_cot_raw_with_known_craft(sample_craft, sample_known_craft):
known_craft_key = "REG"
filter_key = sample_craft["Tail"].strip().upper()
known_craft = (list(filter(
lambda x: x[known_craft_key].strip().upper() == filter_key, sample_known_craft)) or
[{}])[0]
cot = stratuxcot.functions.stratux_to_cot_raw(sample_craft, known_craft=known_craft)
assert isinstance(cot, xml.etree.ElementTree.Element)
assert cot.tag == "event"
assert cot.attrib["version"] == "2.0"
assert cot.attrib["type"] == "a-f-A-T-A-C-O"
assert cot.attrib["uid"] == "ICAO-A33D68"
point = cot.findall("point")
assert point[0].tag == "point"
assert point[0].attrib["lat"] == "37.46306"
assert point[0].attrib["lon"] == "-122.264626"
assert point[0].attrib["hae"] == "2232.6600000000003"
detail = cot.findall("detail")
assert detail[0].tag == "detail"
assert detail[0].attrib["uid"] == "TACO_02"
track = detail[0].findall("track")
assert track[0].attrib["course"] == "135"
assert track[0].attrib["speed"] == "134.78432800000002"
def test_negative_stratux_to_cot():
sample_craft = {"taco": "burrito"}
cot = stratuxcot.stratux_to_cot(sample_craft)
assert cot == None
|
#!/usr/bin/env python
import requests, socket
username = ""
password = ""
hostname = "" # your domain name hosted in no-ip.com
# Gets the current public IP of the host machine.
myip = requests.get('http://api.ipify.org').text
# Gets the existing dns ip pointing to the hostname.
old_ip = socket.gethostbyname(hostname)
# Noip API - dynamic DNS update.
# https://www.noip.com/integrate/request.
def update_dns(config):
r = requests.get("http://{}:{}@dynupdate.no-ip.com/nic/update?hostname={}&myip={}".format(*config))
if r.status_code != requests.codes.ok:
print(r.content)
pass
# Update only when ip is different.
if myip != old_ip:
update_dns( (username, password, hostname, myip) )
pass
|
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DB"] = os.environ.get("MONGO_DB")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/home")
def home():
# LIST ALL OF THE DRINKS IN THE DATABASE
drinks = mongo.db.drinks.find().sort("drink_name", 1)
return render_template("drinks.html", drinks=drinks)
@app.route("/search", methods=["GET", "POST"])
def search():
# SEARCH FUNCTION FROM HOMEPAGE
search = request.form.get("search")
drinks = mongo.db.drinks.find({"$text": {"$search": search}})
return render_template("drinks.html", drinks=drinks)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# CHECK IF USERNAME OR EMAIL IS ALREADY REGISTERED ON SITE
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
# ERROR MESSAGE IF USERNAME ALREADY EXISTS
if existing_user:
flash("Sorry, that username already exists")
return redirect(url_for("register"))
existing_email = mongo.db.users.find_one(
{"email": request.form.get("email").lower()})
# ERROR MESSAGE IF EMAIL ALREADY EXISTS
if existing_email:
flash("Sorry, that email's already registered")
return redirect(url_for("register"))
# DETAILS TO REGISTER IN MONGO DB FOR NEW USERS
register = {
"your_name": request.form.get("your_name").lower(),
"username": request.form.get("username").lower(),
"email": request.form.get("email").lower(),
"password": generate_password_hash(
request.form.get("password")),
"d_o_b": request.form.get("d_o_b").lower()
}
mongo.db.users.insert_one(register)
# START USER IN A SESSION
session["user"] = request.form.get("username").lower()
flash("You have been successfully registered")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# CHECK IF USER EXISTS
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# DOES PASSWORD MATCH USER INPUT
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome back {}".format(request.form.get("username")))
return redirect(url_for("profile", username=session["user"]))
else:
# INCORRECT PASSWORD
flash("Username and/or password is incorrect")
return redirect(url_for("login"))
else:
# INCORRECT USERNAME
flash("Username and/or password is incorrect")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
if session["user"]:
# LOAD PROFILE PAGE IF USER ALREADY EXISTS
return render_template("profile.html", username=username)
else:
# RETURNS USER TO LOGIN PAGE
return redirect(url_for("login"))
@app.route("/logout")
def logout():
# LOGS USER OUT OF THEIR SESSION
flash("You have logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_drink", methods=["GET", "POST"])
def add_drink():
# IF USER IS POSTING INFORMATION TO THE WEBSITE
if request.method == "POST":
new_drink = {
"drink_category": request.form.get("drink_category"),
"drink_name": request.form.get("drink_name"),
"drink_ingredients": request.form.get("drink_ingredients"),
"drink_instructions": request.form.get("drink_instructions"),
"preperation_time": request.form.get("preperation_time"),
"serves": request.form.get("serves"),
"created_by": session["user"]
}
mongo.db.drinks.insert_one(new_drink)
flash("Drink added to database")
return redirect(url_for("home"))
return render_template("add_drink.html")
@app.route("/edit_drink/<drink_id>", methods=["GET", "POST"])
def edit_drink(drink_id):
if request.method == "POST":
# Code to print vale to terminal
print("prep time", request.form.get("preperation_time"))
print("serves", request.form.get("serves"))
# Code to print vale to terminal end - Above prints can be deleted
new_drink = {
"drink_category": request.form.get("drink_category"),
"drink_name": request.form.get("drink_name"),
"drink_ingredients": request.form.get("drink_ingredients"),
"drink_instructions": request.form.get("drink_instructions"),
"preperation_time": request.form.get("preperation_time"),
"serves": request.form.get("serves"),
"created_by": session["user"]
}
mongo.db.drinks.update({"_id": ObjectId(drink_id)}, new_drink)
flash("Drink has been updated")
return redirect(url_for("home"))
drink = mongo.db.drinks.find_one({"_id": ObjectId(drink_id)})
# return redirect(url_for("home"))
return render_template("edit_drink.html", drink=drink)
@app.route("/delete_drink/<drink_id>")
def delete_drink(drink_id):
# FINDS DRINK BY ITS UNIQUE ID AND DELETE IT
mongo.db.drinks.remove({"_id": ObjectId(drink_id)})
flash("Drink has been deleted", "info")
return redirect(url_for("home"))
@app.route("/shop")
def shop():
# PAGE FOR USERS TO SHOP FOR COCKTAIL ESSENTIALS
return render_template("shop.html")
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
|
data = """FBBBFBBLRR
BFFFBBFLRR
BFBFBBFLLR
BBFFFFBLLR
FBBFBBFRLL
BBFFFFBRLL
FBBFBFFLLR
BFFBBBFRRL
FFBFBFFRLR
FBFFFFBLLL
FBFFFFFLRL
FFFBFBBRLR
FFBFFFFLLL
BFBBBFFLLL
FFBBBFBRLR
BFFBFBFLLL
FBFBFFFLLL
BBFFBBBRRL
FBFFBFBLLL
BFFBFBFRRL
FBFBFFFRLR
BFBBBFBRLL
FFBBFBFLRL
FBBFFBFRRR
BFBBBBBLRL
FFBFBFBLRL
FFBFFFFLRL
BFBFBFBRRR
FBBBBBBRRR
BFBFFBBRLL
BBFFBFBLLL
BFBFBBFRLL
FBFBBBBRLR
BFFBBFBRRL
BFBBBFBRRR
FBBBFBBLRL
BBFFBBBLRL
FFBBFFFRRR
FBFFBFFLLL
FBFFBBFRLL
FBBBFBFRLL
BFFBBFFLRL
FFFBFBBRRR
BFBBBBFRLR
FBFBFBBRRL
BFFFFFFRRL
BFBBBBFRRL
FBFBBFFRRL
FBFFBFFRLR
FBBFFBBRRR
FBFBBFFRRR
FFFBBFFRLR
BBFFBFBRRL
FBBBBBFRRR
BFBBBBBRLL
BFFBFFBLRL
FBBBFFBRRR
BFFFBBFRLR
FFBFBBBRLL
BFBBBFFRLL
BFBBFBFRRR
FBFBFBBLLL
BBFBFBBRLL
FBBFFBBRLR
FFBBFBFLRR
FBFFFFFRLL
BFBFBBBRLR
FBFFFFBLRR
FBFFFFBRLL
BFFFFFFRLL
BBFBFFFRRL
BFBFBBBRRR
FFBFFFBLRL
BFFFBBBLLR
FBFFBFFRLL
FFFBFBBLRL
BFBFBBFRRL
FFBFFBBRLL
FBBFBFFLRR
FBFFFFFLLR
FFBFFBFRLL
FFBFFBFLRR
FBFBBBFRLR
FFBFBBFLLR
BBFFBFBLLR
FFBBBBBRRL
BFFBBBBRLL
FFBFBFFLLL
BBFFFBBRRL
FFBFFBFRRL
BBFFBFFRLR
FBFBBFFRLR
FBBFFBFRRL
FBBBFBBRRR
FFBBBBBRRR
BFFBBFBRLL
BFBFBBFLLL
FFBFBBBRRR
BBFFFBBRRR
BFFBFFBLRR
FFFBBFFLRL
BFFBBBFRLR
BFBBFBBRLL
BFBBFBBLLL
FFBFBFBRRL
BFFBBFFRRR
FBFBBBBLLL
FBFFFBFRRR
BFFBFFFLRL
FFFBFBBRRL
FBFBFBBRRR
FBBFBFBLLR
FBBBFFBRLR
FBFBFFBLLR
FBFFBBBRLL
FBFFFBBLRR
BFFFBFFLLL
BFBBBBBLLR
FFBBFFBRLR
BFBFBBFLRL
BBFFBBBRRR
FFBBFFFLRR
BFBFFFFRLR
BFBBBFBRRL
BFFBBBBRLR
BFFBFBBLLR
BFFBBFFLLR
FBFFBBBRLR
FBFFBBBLLR
BBFFBFFLLL
BFFBFBFRRR
BFBFFBFRLR
FBFFFBBLRL
FBBBFBFRRL
BBFFFFBLLL
FFBBBFFLRL
BFFBFFFRLR
FBBFBBFLLL
FBFFBFFRRL
FFBFFBBRLR
BBFFBFFLRR
FBFBBBBLLR
FFBBBFFLRR
BFBBFBFLLR
FBBFBBFLLR
FFBFFBBRRL
BBFFFFBRRL
FBBBBFBRRL
FBFBFFFLLR
BFBFBFBLLR
FFBFBFBRRR
BFFFBFFLLR
BBFFBFFRLL
FBBBFFBRLL
FBBFFFFRRL
FBBBFFFRRR
BBFFFBBRLL
BFFBBFFLRR
BFFBFBBRLR
BFFBFBBLLL
BBFBFFBLLR
FBBBBFFLLL
BFFBBFBLRL
FBBBBFBRRR
FBBFBBBRRL
BFFBBFFRLL
BFFFFFBLLR
BFFBBFBLRR
FFBBFBFRRR
BFBFFBFRRL
BFFFFBBLRR
FFFBBFBLRR
BFFFBBBLRR
FFBFBBFLRL
FBBBFFFLLL
FBFBFBFLRR
FFFBBFBLLL
FFBBFBBLLR
FBBFBBFLRL
FFBFFBBRRR
FBBBBFFRRL
FFFBBBFRLR
FBBFFFBLRL
BFFBFBBLRL
BFFBFBFLRR
BFFBFFBRRR
FFBFBBBRRL
BFFBFFFLLR
BFFBBFBRLR
FBBBFFFRRL
BBFBFFBLRL
BBFFFBBLLR
FBFBFFFRRR
BFFFBBBLRL
FBFFBFFLLR
BBFFFFFRLL
FFBFFFBLLL
BFBBFBBRLR
BFFFFFBLLL
BFBBFFFLLL
FFBBBBFRLR
BBFFFFFLLR
BFFFFBFLRL
FFBFFBBLLR
BBFFBFFLLR
BFBFFBFRRR
FBFFBFFRRR
BBFFFBFLLL
FFBFFFFRRL
FBBFBFFRRR
FFBBFBBRRL
FFBBBFFLLR
BFFFFBBLLR
FFBFBFBLLR
BFFBBBBLLR
FBBBBFBLLL
BFBFBBBRLL
FFBFBBFRRR
FFFBFBBLLR
FFFBBFBRRR
FFFBBBBLRR
FBBFFFFLRL
FFFBBFFLRR
FFBFBFFRLL
FBBFBBBRLR
FFBFBFBRLR
FBBBFFBLRL
BFFFFFFLRL
BFFBBBBLRR
FBBFFBBLRR
BBFFBBFRRL
FFBFBBBLLR
FFBFFFFRRR
FBFFFBBRRL
BFBFFFBLLR
FBBFBFBRRL
BBFBFFBRLR
FBBBBBFRLR
BFFBBBFLRR
BFFFFFBLRR
BBFFBBFRLL
FFFBBFFRRR
FBBBBFFRLL
FFBFFFFRLR
BFFFBFFRRL
FBBBBFFLRL
BBFBFFFRLL
BBFFFBBLRR
FBBBFFFLLR
BFFBFBFLRL
FFBBBBFLLL
BFFFBFFRLL
FBBFFBFLLR
BBFBFBBRRR
FFBFFFBRLL
FBFBBBFRLL
BFBBFBBLRR
FBBBBFFRLR
BFFBBBBLLL
FFFBBBBRLR
FBBFBBBRLL
FFBBBFBLRL
BBFBFFBLRR
FFBBFFFLLR
FFBBFBFRLR
BFBBBFBRLR
FBBFFFFLLL
BBFFFBFRRR
FFBFBBFLLL
FBBFBBBRRR
BFBBFFBRRR
FBBFFBFLLL
FFBFBBBLRL
FBFBBFBRRR
FBFBFFFRLL
BFFFFFFLLR
FBFBBBFLLR
BFBFFBFRLL
BFBFFBBRLR
BFFBBFBLLR
BFFBFBBLRR
BFBBBFFLRL
BFBBFBFLRR
BFBBBFFRLR
FFFBBFFRRL
BFFFBBFRRL
BFFBFBFLLR
BFBFFBBLRR
BFBBBBBRRR
FFBBFBBLRR
BFFBBFFRLR
FFBFBFBRLL
BFBBBBFLRL
FBBFBBFLRR
FBFBBBFLRL
FBFFFBBRRR
BBFBFFFRRR
FBFFBFBRLL
FBBBBBBRLL
FFBBBFFRRL
FFBBFFFRLL
FBFBBFBRLL
FFFBBBBLLL
BFBFBBBLLL
FFFBBBFLLR
FFBFBBFRLL
BBFFBBBLLR
FFFBBFBLRL
FFBBFBBLLL
FBFFFBFRLR
BFBBFBBLRL
FFFBFBFRLR
BFBFFFBRLR
FFBBBBBLLL
FBFBFFBRRR
BFFBBBBRRL
FBBBBFBLLR
BFBBFBFRRL
BBFBFBFLLR
BFFFBFBRRR
FFFBBFBRRL
BFBBBBFLLR
FBBBFBFLLR
FFBFFBFLRL
BFFFBFBLLL
BFBBBBBLRR
BFBFBBBLRR
BFFBFBBRLL
FFBFBFFLRL
FBBBFBBRRL
BFFBBFFLLL
FBFBFFBRLR
FBBFFFFLRR
FBBFBFBLRL
BBFFBBBLRR
BBFFBBFLLL
FBBBBBBRRL
BFFFFBFRRL
BFFFFBBLRL
BBFFFBFRLL
BFFBBFFRRL
FBBBFBFRLR
FBFFBFBLLR
FFBFFFFLRR
FBBBBBBLRL
FFBBFBBRLL
FFBBFBBLRL
FBFFFBBRLR
FFBBFFFLLL
BFFFFBFRRR
BFBFBFBLLL
FFFBBFFLLR
FBBBFBBRLL
BFFFFBBRLL
FBFBFFBRRL
BFBBBFBLLR
BFFBBBBRRR
BBFFFFFRLR
BFFFBBBRRL
BBFFFFFRRL
BFFBFBFRLL
BBFFFFFLRR
FBFBBBBLRR
FBFFBBBLRL
BFBFBFFRLL
FFFBBBBLLR
FBFBBBBRRR
FBBBBBFRRL
BFBFFFBRRL
BFBFFFBRRR
BBFBFBBLRR
FFBBBFBLLR
FFBBBFBRLL
BFBFFFFLLR
BFBFBFFLRL
FBFFFBBLLR
BFFFFFBRLL
BBFBFBBRRL
BFBBFFFRLL
BFFFFFFRLR
FBFFBFBRRR
BBFFBBFRRR
FFBBBFFRRR
FFBBBFFLLL
BFBBFFFLRR
FBBFFBFLRL
FBFFFFBLLR
BFBBBFBLRL
BFFBBBFRRR
BFBBBFFLLR
BFFFBBBRRR
BBFBFBFRRR
FFBFFBFRLR
FBBFFFFRLR
BFBFBBFRRR
BBFFBBFLRR
FBBBFFBLRR
BFBBFFBLRL
FBFBBBBRRL
FFFBBBFLRL
FBBFFFBRLR
FFFBBFFLLL
BFFBFBBRRL
BFFFBFBLRR
FBBFFFBRRR
FBBFFFBRRL
FBBBBBBRLR
FBFFFBFLRL
BBFFBFBLRL
FBBFFFBLLR
BFBFFBBRRR
FBFBBFFLRR
FBBFFBBLLR
FFBBBFFRLR
BFFBFFFLLL
FBBFFFFRLL
BFBFFBBLRL
BFBFFBFLLR
BFBBFFFRRR
BBFFFBFRLR
FBFBBBFRRL
FFBBFBBRLR
BFFFFFFRRR
BBFBBFFLLL
BBFBFFBRRR
FBBFFBBLLL
FFBFBFBLRR
BFBBFFFRRL
BBFBFFFLRR
FBBBBFBLRL
FBBFBFBRRR
FBFBFBBLRR
FBFFBBBLLL
FFBFBFFLRR
BFBFBFFRRR
BFBFBFFLRR
BFFFFFFLRR
FBFBFBBRLR
FFBFFFFRLL
FBBBFBFLRL
BFBFBBFLRR
FFBBFBBRRR
FBFBFFFLRR
BBFBFFBRRL
BFBFBFFLLL
FBFFBFBRLR
FBBBBBBLLR
FFBBBBFRLL
BFBFBBBLLR
FBFBBBBRLL
BFBBBBFLLL
FBFBFFBLRL
FFFBBBBRRR
BFFFFBFLLR
BBFFFFBLRR
BBFFFBFRRL
FBBFBFFLLL
BFFFBFBRLL
FBBBFFBRRL
FFFBFBBLRR
BBFFBBFLRL
BBFFBFFRRR
BFFFFBFRLR
FBFBBFBLRL
FBFFBBFLLL
FBFFFBBLLL
FFBBFFBLRL
FBBFBBFRLR
FBBFBBBLRL
FBFFFBBRLL
BFBFBBBRRL
BFBFBFBRLR
FBFFBFFLRR
BFFFBFFLRR
BFBBBFFLRR
FBBBBBFLLL
FFBFBFFRRR
BBFFBFFRRL
BFBBBFBLLL
FFBBFBFRLL
FBBBFBFRRR
FBFFBBBLRR
BBFFFFBRRR
FBFBFFFRRL
BFFFBBFLRL
FBBFFFBLLL
BFBBFFBLLR
BFBBFFBLLL
FBFFBBFRRR
BFFBBFBRRR
BFFFFFBRLR
FBBBBBBLLL
FFBFBBFLRR
FBFBFBBRLL
BBFBFFFLLR
BFFFFBBRRL
BFFBFFBLLR
BFFFBBFRLL
BBFFFFFLRL
FBFFFFFLLL
BBFFBBBRLL
BFBBFFBRRL
BFFFBBBRLL
FBFFBBFLRL
BBFBFFBLLL
FBFBBFBLRR
FFFBBBFLRR
BBFBFBFLRR
BFBBBBFRRR
BFFFBFFLRL
FBFFFFBRRL
BBFBFBFLLL
BFFFBBBRLR
FBFFBBFRRL
FBBFFBBRRL
FBFBFBFRLR
BFFBFFFRRL
FFFBFBBLLL
BFBBFBFRLL
BFFFBFBLLR
FBBFBBFRRL
FBBBBBFLRR
FBFFBFBLRR
FBBBFFBLLL
BFBBBFFRRR
BFBFFFFLRL
FFBBFFBRRL
BFFFFFFLLL
FFBFFBBLRL
FFFBBBFRLL
BFFFBBFLLL
FBBBFBBRLR
BBFBFFFRLR
BFFBFFBLLL
FFBBFBFLLR
FBFFBBBRRR
BFBFFBFLRR
FBFBFBBLRL
FFBFFFBRRR
FBBFFFFLLR
BFBBBFFRRL
FFBBBFFRLL
FFFBFBFRRR
FFBBBBFLRL
BFBFBFBRLL
FFFBBBFLLL
BFBFFFFLRR
BFFBFFBRRL
BFBFFBBLLL
FBBBBFBLRR
FFBFFBFRRR
FFBFFFBLLR
FFBBFFFRRL
BFBFFFFRLL
BBFFFBBRLR
FBFBBFBRRL
BFFFFFBLRL
FFBFFFFLLR
FBFFFBFLLR
FFFBBFBRLR
BBFBBFFLRL
FFBBBBBLLR
BFBBFBFLLL
BFBFFFFRRL
BFBBFFBRLR
FBBBFFFRLR
FBBFFBFRLR
BBFBFBBRLR
FBBBFFBLLR
FBBBFFFRLL
FBFBFBFRRR
BBFFBFBRRR
FFBBBBBRLL
FBFFBFBRRL
FBFFBBFLRR
FFFBBBFRRL
FFBBFFBRLL
BFBFFFBRLL
FBFFFBFLLL
FBBBBFFRRR
FBBFBFBRLL
BFFBFBFRLR
FFBFBFBLLL
FFBBFFBRRR
FBFFFFBLRL
FBBBFFFLRR
FFBBBBFRRL
FFBFBBBRLR
FBBBBFFLRR
BFBFBFBLRR
BBFBFFFLRL
FFFBBFBRLL
FBBBBBFLRL
FBBFBBBLLR
BFBBFFFLLR
FBFFFFFRLR
BBFFFFBRLR
FBFFFFFRRL
FFBBFFFLRL
FFBBBFBRRR
BBFFFBBLRL
FBBBBFBRLR
FBFBBFFLRL
FBFFBFFLRL
FFBFFFBRRL
FFBBBBFLLR
FBBFBFBLRR
FBFBBFBLLL
FFBBBFBLLL
BFFFFBBRRR
BBFFFBFLRL
FFFBBBBRLL
FFBFFBBLLL
FBFFFBFLRR
FBFBFFBLLL
BFFFBFFRRR
FFBBFFBLRR
BFBBFBFRLR
FFBFBFFRRL
BBFFFFBLRL
FFBBFFFRLR
BFBBFFBLRR
BBFBFBFRLL
FBFFFFBRLR
BBFFBBFLLR
FBBFFBBLRL
BFFFFFBRRL
BFBBFBFLRL
BBFFBFBRLR
BFFFFBFLRR
BFBFBFBLRL
FBFBFFFLRL
BFFFBBFLLR
BBFBFBBLRL
BFFFFBBLLL
BBFBFBBLLL
BFBBBBBRRL
FBFFBBFLLR
FFBFFBBLRR
FFBBFBFRRL
FBFBBBFLRR
FBFBBFFLLR
FFBBBBBRLR
FBFBBBFLLL
FBBFBBFRRR
FBBFBBBLLL
BFBFFBFLRL
FBFBFFBRLL
BFBFBFFLLR
FBBFFBFRLL
BFFBBBFLLR
BFFFFBBRLR
BFFBBFBLLL
BFFFFBFRLL
FFBFBBFRLR
BFFFBBBLLL
FBFBFBFLLL
BFBBBBBRLR
BFBBBFBLRR
FBBBFBFLLL
FBFFBBFRLR
BBFFBBBRLR
FFBFBBFRRL
BFBFBBFRLR
FBFFFFFLRR
BBFFFBFLRR
FBBFBFFRLR
BFBBFFFLRL
FBFBBFBLLR
BFFBBBFLRL
BFFBBBFLLL
BFBBFFFRLR
FBFFBBBRRL
BFFFBFBLRL
FBBBBBFRLL
BFFBBBBLRL
FBBFFFBRLL
FBBFBFFRLL
FBBFFBFLRR
FFBFBBBLLL
FFBFBBBLRR
BFBFFFBLLL
FFBFFFBLRR
FBFBBFBRLR
FBFBBBFRRR
FFFBFBFRLL
FBFFFBFRLL
BBFFBFBRLL
FFFBBBBRRL
FBFBFBBLLR
BBFFBBBLLL
FBBBBBFLLR
BFBFFFFRRR
FBBBBFFLLR
BFBBBBFLRR
BFBFFFBLRR
BFFFBBFRRR
FFFBBFBLLR
BFBBBBFRLL
BBFBFBFRRL
BBFBFFFLLL
FBFBFBFRLL
FBFBFFBLRR
BFBFBFBRRL
BFBBFFBRLL
BBFBFFBRLL
BFBBFBBRRR
FFBBBBFLRR
BBFFFBBLLL
FBBBFBBLLL
FFBBBFBLRR
BFBFBFFRLR
BBFBFBBLLR
BFBFFBFLLL
FBFBBFFRLL
FBFBFBFLRL
FBBFFFBLRR
BFBFFBBRRL
FFBFBFFLLR
FFBBFFBLLR
FFFBBBFRRR
BFFBFBBRRR
FFFBBBBLRL
FBBBBBBLRR
FBBBFBFLRR
FFBBBBFRRR
FFBFFBFLLL
BBFFBBFRLR
FBFFFFFRRR
FBBBFFFLRL
BFFBFFBRLR
FBFFFBFRRL
BBFFFFFLLL
FFBBFBFLLL
BFFBFFBRLL
FFBBFFBLLL
BBFFFBFLLR
FBBFBFBLLL
BFFFFFBRRR
BFBFBBBLRL
FBBFFBBRLL
BFFBFFFRLL
BFFBFFFLRR
BFFBBBFRLL
FBFBBFFLLL
FBFBFBFRRL
BFBBFBBLLR
FBFBBBBLRL
BFFFBFBRLR
FFFBFBFRRL
BBFBBFFLLR
FBBFBFFLRL
FFBBBBBLRR
FBBBBFBRLL
BBFBFBFRLR
BFBBBBBLLL
FBBBFBBLLR
FFFBBFFRLL
FFFBFBBRLL
FFBFFBFLLR
BBFFFFFRRR
BBFFBFFLRL
FBFFFFBRRR
BFBFFFBLRL
BBFFBFBLRR
FFBBBFBRRL
FBFFBFBLRL
FBBFBFBRLR
BFBFBFFRRL
BFFFBFBRRL
BFFFBFFRLR
BBFBFBFLRL
FBBFBFFRRL
BFBBFBBRRL
BFFFFBFLLL
FBBFBBBLRR
BFBFFFFLLL
FFBBBBBLRL
BFBFFBBLLR
FBFBFBFLLR
FFBFFFBRLR
FBBFFFFRRR"""
|
import unittest
from stacker.blueprints.testutil import BlueprintTestCase
from stacker.context import Context
from stacker.config import Config
from stacker.variables import Variable
from stacker_blueprints.generic import GenericResourceCreator
class TestGenericResourceCreator(BlueprintTestCase):
def setUp(self):
self.ctx = Context(config=Config({'namespace': 'test'}))
def test_create_template(self):
blueprint = GenericResourceCreator(
'test_generic_GenericResourceCreator', self.ctx
)
blueprint.resolve_variables(
[
Variable('Class', 'ec2.Volume'),
Variable('Output', 'VolumeId'),
Variable('Properties', {
'VolumeType': 'gp2',
'Size': '600',
'Encrypted': 'true',
'AvailabilityZone': 'us-east-1b',
}),
]
)
blueprint.create_template()
self.assertRenderedBlueprint(blueprint)
if __name__ == '__main__':
unittest.main()
|
from player import Player
tim = Player('Tim')
# print(tim.name)
# print(tim.lives)
# tim.lives -= 1
# print(tim)
tim.lives = -1
# for i in range(3):
# print('-'*20)
# tim.lives -= 1
# print(tim)
|
"""
create and annotate references to strings in 64-bit Windows Go executables.
expect to see the assembly pattern:
lea reg, $string
mov [stack], reg
mov [stack], $size
"""
import idc
import ida_ua
import ida_name
import ida_bytes
import idautils
def enum_segments():
for segstart in idautils.Segments():
segend = idc.get_segm_end(segstart)
segname = idc.get_segm_name(segstart)
yield segstart, segend, segname
def find_pointers(start, end):
for va in range(start, end-0x8):
ptr = ida_bytes.get_qword(va)
if idc.get_segm_start(ptr) == idc.BADADDR:
continue
yield va, ptr
def is_head(va):
return ida_bytes.is_head(idc.get_full_flags(va))
def get_head(va):
if is_head(va):
return va
else:
return idc.prev_head(va)
def is_code(va):
if is_head(va):
flags = idc.get_full_flags(va)
return ida_bytes.is_code(flags)
else:
head = get_head(va)
return is_code(head)
def main():
for segstart, segend, segname in enum_segments():
for head in idautils.Heads(segstart, segend):
if not is_code(head):
continue
# pattern:
#
# lea rax, unk_6BDF88
# mov [rsp+0], rax
# mov qword ptr [rsp+8], 40h
if ida_ua.ua_mnem(head) != "lea":
continue
next_head = ida_bytes.next_head(head, idc.BADADDR)
if ida_ua.ua_mnem(next_head) != "mov":
continue
next_head2 = ida_bytes.next_head(next_head, idc.BADADDR)
if ida_ua.ua_mnem(next_head2) != "mov":
continue
dst = idc.get_operand_value(head, 1)
if idc.get_segm_name(dst) not in (".rdata", "UPX1"):
continue
size = idc.get_operand_value(next_head2, 1)
if size > 0x100:
continue
if size <= 2:
continue
buf = ida_bytes.get_bytes(dst, size)
if not buf:
continue
if b"\x00" in buf:
continue
try:
s = buf.decode("ascii")
except UnicodeDecodeError:
continue
print("string pointer: 0x%x -> 0x%x: %s" % (head, dst, s))
ida_bytes.del_items(dst, 1)
ida_bytes.create_data(dst, idc.FF_BYTE, 1, idc.BADADDR)
ida_bytes.set_cmt(dst, s, True)
ida_name.set_name(dst, "s_%x" % (dst))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
###############################################################################
#
# CSW Client
# ---------------------------------------------------------
# QGIS Catalog Service client.
#
# Copyright (C) 2010 NextGIS (http://nextgis.org),
# Alexander Bruy (alexander.bruy@gmail.com),
# Maxim Dubinin (sim@gis-lab.info)
#
# Copyright (C) 2017 Tom Kralidis (tomkralidis@gmail.com)
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
from qgis.core import QgsSettings
from qgis.PyQt.QtWidgets import QDialog, QMessageBox
from MetaSearch.util import get_ui_class
BASE_CLASS = get_ui_class('newconnectiondialog.ui')
class NewConnectionDialog(QDialog, BASE_CLASS):
"""Dialogue to add a new CSW entry"""
def __init__(self, conn_name=None):
"""init"""
QDialog.__init__(self)
self.setupUi(self)
self.settings = QgsSettings()
self.conn_name = None
self.conn_name_orig = conn_name
self.username = None
self.password = None
def accept(self):
"""add CSW entry"""
conn_name = self.leName.text().strip()
conn_url = self.leURL.text().strip()
conn_username = self.leUsername.text().strip()
conn_password = self.lePassword.text().strip()
if any([conn_name == '', conn_url == '']):
QMessageBox.warning(self, self.tr('Save Connection'),
self.tr('Both Name and URL must be provided.'))
return
if '/' in conn_name:
QMessageBox.warning(self, self.tr('Save Connection'),
self.tr('Name cannot contain \'/\'.'))
return
if conn_name is not None:
key = '/MetaSearch/%s' % conn_name
keyurl = '%s/url' % key
key_orig = '/MetaSearch/%s' % self.conn_name_orig
# warn if entry was renamed to an existing connection
if all([self.conn_name_orig != conn_name,
self.settings.contains(keyurl)]):
res = QMessageBox.warning(self, self.tr('Save Connection'),
self.tr('Overwrite {0}?').format(conn_name),
QMessageBox.Ok | QMessageBox.Cancel)
if res == QMessageBox.Cancel:
return
# on rename delete original entry first
if all([self.conn_name_orig is not None,
self.conn_name_orig != conn_name]):
self.settings.remove(key_orig)
self.settings.setValue(keyurl, conn_url)
self.settings.setValue('/MetaSearch/selected', conn_name)
if conn_username != '':
self.settings.setValue('%s/username' % key, conn_username)
if conn_password != '':
self.settings.setValue('%s/password' % key, conn_password)
QDialog.accept(self)
def reject(self):
"""back out of dialogue"""
QDialog.reject(self)
|
class WalletExists(Exception):
""" A wallet has already been created and requires a password to be
unlocked by means of :func:`transnet.wallet.unlock`.
"""
pass
class WalletLocked(Exception):
""" Wallet is locked
"""
pass
class RPCConnectionRequired(Exception):
""" An RPC connection is required
"""
pass
class AccountExistsException(Exception):
""" The requested account already exists
"""
pass
class AccountDoesNotExistsException(Exception):
""" The account does not exist
"""
pass
class AssetDoesNotExistsException(Exception):
""" The asset does not exist
"""
pass
class InvalidAssetException(Exception):
""" An invalid asset has been provided
"""
pass
class InsufficientAuthorityError(Exception):
""" The transaction requires signature of a higher authority
"""
pass
class MissingKeyError(Exception):
""" A required key couldn't be found in the wallet
"""
pass
class InvalidWifError(Exception):
""" The provided private Key has an invalid format
"""
pass
class ProposalDoesNotExistException(Exception):
""" The proposal does not exist
"""
pass
class BlockDoesNotExistsException(Exception):
""" The block does not exist
"""
pass
class NoWalletException(Exception):
""" No Wallet could be found, please use :func:`peerplays.wallet.create` to
create a new wallet
"""
pass
class WitnessDoesNotExistsException(Exception):
""" The witness does not exist
"""
pass
class WrongMasterPasswordException(Exception):
""" The password provided could not properly unlock the wallet
"""
pass
class CommitteeMemberDoesNotExistsException(Exception):
""" Committee Member does not exist
"""
pass
class VestingBalanceDoesNotExistsException(Exception):
""" Vesting Balance does not exist
"""
pass
class WorkerDoesNotExistsException(Exception):
""" Worker does not exist
"""
pass
class ObjectNotInProposalBuffer(Exception):
""" Object was not found in proposal
"""
pass
class InvalidMessageSignature(Exception):
""" The message signature does not fit the message
"""
pass
class KeyNotFound(Exception):
""" Key not found
"""
pass
|
'''
Code to read SEVIRI data in HRIT format.
'''
import satpy # Requires virtual environmen for reading native (.nat) and hrit files.
import numpy as np
import datetime
import glob
from pyresample import geometry, bilinear
import os
import h5py
import sys
from tempfile import gettempdir
# The following are necessary to read compressed HRIT files
os.environ['XRIT_DECOMPRESS_PATH']='/home/users/bpickering/SOFTWARE/PublicDecompWT/2.06/xRITDecompress/xRITDecompress' # Necessary to read compressed files.
my_tmpdir = gettempdir() + '/'#/gws/nopw/j04/swift/bpickering/SEVIRI_visualisation/temp' # should match the unix enviornment variable TMPDIR, setting this to homespace facillitates deleting decompressed files after they have been read, which can otherwise fill up tmp directory.
print ('Temporary Directory for Decompress: '+my_tmpdir)
sev_data_dir1='/gws/nopw/j04/swift/earajr/HRIT_archive/'
sev_data_dir2='/gws/nopw/j04/swift/SEVIRI/' # Second directory to check. Necessary as first directory is incomplete
unavailable_times = (
[datetime.datetime(2014,3,2,12,00,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2014,3,3,12,00,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2014,3,4,12,00,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2014,3,26,2,45,0)]
+[datetime.datetime(2014,5,21,1,45,0)] # Problems reading
+[datetime.datetime(2014,5,21,7,15,0)] # Problems reading
+[datetime.datetime(2014,5,21,8,0,0)] # Problems reading
+[datetime.datetime(2014,5,21,10,30,0)] # Problems reading
+[datetime.datetime(2014,5,21,10,45,0)] # Problems reading
+[datetime.datetime(2014,5,21,11,15,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,21,12,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(6)] # Problems reading
+[datetime.datetime(2014,5,21,14,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,21,15,0,0)] # Problems reading
+[datetime.datetime(2014,5,21,18,0,0)] # Problems reading
+[datetime.datetime(2014,5,21,19,15,0)] # Problems reading
+[datetime.datetime(2014,5,21,20,0,0)] # Problems reading
+[datetime.datetime(2014,5,21,20,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,21,22,0,0)] # Problems reading
+[datetime.datetime(2014,5,21,23,30,0)] # Problems reading
+[datetime.datetime(2014,5,22,4,15,0)] # Problems reading
+[datetime.datetime(2014,5,22,5,45,0)+datetime.timedelta(seconds=60*15*n) for n in range(8)] # Problems reading
+[datetime.datetime(2014,5,22,8,15,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,22,9,0,0)] # Problems reading
+[datetime.datetime(2014,5,22,9,45,0)] # Problems reading
+[datetime.datetime(2014,5,22,10,15,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,22,11,0,0)] # Problems reading
+[datetime.datetime(2014,5,22,11,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,22,12,0,0)] # Problems reading
+[datetime.datetime(2014,5,22,12,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,22,13,15,0)+datetime.timedelta(seconds=60*15*n) for n in range(4)] # Problems reading
+[datetime.datetime(2014,5,22,14,30,0)] # Problems reading
+[datetime.datetime(2014,5,22,16,30,0)] # Problems reading
+[datetime.datetime(2014,5,22,17,0,0)] # Problems reading
+[datetime.datetime(2014,5,22,18,30,0)] # Problems reading
+[datetime.datetime(2014,5,22,19,30,0)] # Problems reading
+[datetime.datetime(2014,5,22,21,45,0)] # Problems reading
+[datetime.datetime(2014,5,22,23,0,0)] # Problems reading
+[datetime.datetime(2014,5,23,0,15,0)] # Problems reading
+[datetime.datetime(2014,5,23,0,45,0)] # Problems reading
+[datetime.datetime(2014,5,23,4,0,0)] # Problems reading
+[datetime.datetime(2014,5,23,4,45,0)] # Problems reading
+[datetime.datetime(2014,5,23,10,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(9)] # Problems reading
+[datetime.datetime(2014,5,23,13,0,0)] # Problems reading
+[datetime.datetime(2014,5,23,14,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,23,15,45,0)] # Problems reading
+[datetime.datetime(2014,5,23,16,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,23,19,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Problems reading
+[datetime.datetime(2014,5,23,20,15,0)] # Problems reading
+[datetime.datetime(2014,5,23,20,45,0)] # Problems reading
+[datetime.datetime(2014,5,23,22,15,0)] # Problems reading
+[datetime.datetime(2014,6,20,19,45,0)] # Problems reading
+[datetime.datetime(2014,8,15,14,45,0)] # Problems reading
+[datetime.datetime(2014,10,9,11,45,0)] # Problems reading
+[datetime.datetime(2014,10,10,11,45,0)] # Problems reading
+[datetime.datetime(2014,10,11,11,45,0)] # Problems reading
+[datetime.datetime(2014,10,12,11,45,0)] # Problems reading
+[datetime.datetime(2014,12,4,14,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2014,12,8,6,45,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2015,3,1,12,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2015,3,2,12,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2015,3,3,9,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(4)]
+[datetime.datetime(2015,3,3,12,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(3)]
+[datetime.datetime(2015,3,4,12,15,0)]
+[datetime.datetime(2015,3,16,11,30,0)]
+[datetime.datetime(2015,4,28,7,45,0)+datetime.timedelta(seconds=60*15*n) for n in range(4)]
+[datetime.datetime(2015,4,28,12,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(3)]
+[datetime.datetime(2015,5,14,9,45,0)]
+[datetime.datetime(2015,5,14,10,45,0)]
+[datetime.datetime(2015,5,14,11,15,0)]
+[datetime.datetime(2015,5,29,2,15,0)]
+[datetime.datetime(2015,6,1,5,45,0)]
# +[datetime.datetime(2015,6,1,13,30,0)] # Problems reading
+[datetime.datetime(2015,6,2,2,45,0)] # Problems reading
+[datetime.datetime(2015,6,2,13,0,0)] # Problems reading
+[datetime.datetime(2015,6,3,2,15,0)] # Problems reading
+[datetime.datetime(2015,6,3,5,45,0)] # Problems reading
+[datetime.datetime(2015,6,3,7,45,0)] # Problems reading
+[datetime.datetime(2015,6,4,3,45,0)] # Problems reading
+[datetime.datetime(2015,6,4,19,0,0)] # Problems reading
+[datetime.datetime(2015,6,4,20,45,0)] # Problems reading
+[datetime.datetime(2015,6,5,12,15,0)] # Problems reading
+[datetime.datetime(2015,6,5,16,0,0)] # Problems reading
+[datetime.datetime(2015,6,8,2,15,0)] # Problems reading
+[datetime.datetime(2015,6,9,6,15,0)] # Problems reading
+[datetime.datetime(2015,6,9,10,0,0)] # Problems reading
+[datetime.datetime(2015,6,9,21,0,0)] # Problems reading
+[datetime.datetime(2015,6,10,18,0,0)] # Problems reading
+[datetime.datetime(2015,6,10,19,0,0)] # Problems reading
+[datetime.datetime(2015,6,11,4,15,0)] # Problems reading
+[datetime.datetime(2015,6,11,5,30,0)] # Problems reading
+[datetime.datetime(2015,6,12,15,0,0)] # Problems reading
+[datetime.datetime(2015,6,12,18,45,0)] # Problems reading
+[datetime.datetime(2015,6,12,22,15,0)] # Problems reading
+[datetime.datetime(2015,6,13,19,30,0)] # Problems reading
+[datetime.datetime(2015,6,14,1,0,0)] # Problems reading
+[datetime.datetime(2015,6,14,6,0,0)] # Problems reading
+[datetime.datetime(2015,6,14,7,15,0)] # Problems reading
+[datetime.datetime(2015,6,14,20,30,0)] # Problems reading
+[datetime.datetime(2015,6,14,21,15,0)] # Problems reading
+[datetime.datetime(2015,6,14,23,45,0)] # Problems reading
+[datetime.datetime(2015,6,15,2,0,0)] # Problems reading
+[datetime.datetime(2015,6,15,4,15,0)] # Problems reading
+[datetime.datetime(2015,6,15,7,0,0)] # Problems reading
+[datetime.datetime(2015,6,15,7,45,0)] # Problems reading
+[datetime.datetime(2015,6,15,14,15,0)] # Problems reading
+[datetime.datetime(2015,6,15,19,45,0)] # Problems reading
+[datetime.datetime(2015,6,15,20,0,0)] # Problems reading
+[datetime.datetime(2015,6,15,22,0,0)] # Problems reading
+[datetime.datetime(2015,6,15,22,45,0)] # Problems reading
+[datetime.datetime(2015,6,16,3,30,0)] # Problems reading
+[datetime.datetime(2015,6,16,7,0,0)] # Problems reading
+[datetime.datetime(2015,6,16,12,15,0)] # Problems reading
+[datetime.datetime(2015,6,16,15,30,0)] # Problems reading
+[datetime.datetime(2015,6,16,16,0,0)] # Problems reading
+[datetime.datetime(2015,6,17,12,30,0)] # Problems reading
+[datetime.datetime(2015,6,17,12,45,0)] # Problems reading
+[datetime.datetime(2015,6,17,14,15,0)] # Problems reading
+[datetime.datetime(2015,6,18,10,45,0)] # Problems reading
+[datetime.datetime(2015,6,18,15,30,0)] # Problems reading
+[datetime.datetime(2015,6,18,15,45,0)] # Problems reading
+[datetime.datetime(2015,6,18,16,30,0)] # Problems reading
+[datetime.datetime(2015,6,18,17,30,0)] # Problems reading
+[datetime.datetime(2015,6,18,19,15,0)] # Problems reading
+[datetime.datetime(2015,6,18,21,0,0)] # Problems reading
+[datetime.datetime(2015,6,19,7,30,0)] # Problems reading
+[datetime.datetime(2015,6,19,14,30,0)] # Problems reading
+[datetime.datetime(2015,6,19,16,30,0)] # Problems reading
+[datetime.datetime(2015,6,20,4,15,0)] # Problems reading
+[datetime.datetime(2015,6,20,13,30,0)] # Problems reading
+[datetime.datetime(2015,6,21,4,30,0)] # Problems reading
+[datetime.datetime(2015,6,21,7,15,0)] # Problems reading
+[datetime.datetime(2015,6,21,9,30,0)] # Problems reading
+[datetime.datetime(2015,6,21,15,0,0)] # Problems reading
+[datetime.datetime(2015,6,21,18,45,0)] # Problems reading
+[datetime.datetime(2015,6,22,1,45,0)] # Problems reading
+[datetime.datetime(2015,6,22,9,0,0)] # Problems reading
+[datetime.datetime(2015,6,22,16,15,0)] # Problems reading
+[datetime.datetime(2015,6,22,16,30,0)] # Problems reading
+[datetime.datetime(2015,7,1,0,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(4)]
+[datetime.datetime(2015,10,10,11,45,0)]
+[datetime.datetime(2015,10,11,11,45,0)]
+[datetime.datetime(2015,10,12,11,45,0)]
+[datetime.datetime(2015,10,21,10,0,0)]
+[datetime.datetime(2015,11,15,3,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(19)]
+[datetime.datetime(2015,11,16,8,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2015,11,25,12,15,0)]
+[datetime.datetime(2016,2,29,12,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2016,1,1,8,45,0)] #Problems reading
+[datetime.datetime(2016,1,1,23,15,0)] #Problems reading
+[datetime.datetime(2016,1,2,2,15,0)] #Problems reading
+[datetime.datetime(2016,1,3,3,15,0)] #Problems reading
+[datetime.datetime(2016,1,4,3,45,0)] #Problems reading
+[datetime.datetime(2016,1,4,7,30,0)] #Problems reading
+[datetime.datetime(2016,1,4,8,45,0)] #Problems reading
+[datetime.datetime(2016,1,4,16,30,0)] #Problems reading
+[datetime.datetime(2016,1,5,11,15,0)] #Problems reading
+[datetime.datetime(2016,1,5,12,15,0)] #Problems reading
+[datetime.datetime(2016,1,5,20,15,0)] #Problems reading
+[datetime.datetime(2016,1,6,9,45,0)] #Problems reading
+[datetime.datetime(2016,1,6,15,30,0)] #Problems reading
+[datetime.datetime(2016,1,6,20,45,0)] #Problems reading
+[datetime.datetime(2016,3,8,10,30,0)]
+[datetime.datetime(2016,6,8,14,0,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2016,7,20,15,15,0)] # Problems reading
+[datetime.datetime(2016,7,29,2,45,0)]
+[datetime.datetime(2016,8,17,9,45,0)]
+[datetime.datetime(2016,10,8,9,30,0)] # Unavailable from Eumetsat
+[datetime.datetime(2016,10,10,11,45,0)] # Problems reading
+[datetime.datetime(2016,10,11,11,0,0)] # Problems reading
+[datetime.datetime(2016,10,11,11,45,0)] # Problems reading
+[datetime.datetime(2016,10,12,11,45,0)] # Problems reading
+[datetime.datetime(2016,10,12,16,45,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # Supposedly available from EumetSat website, but no files in folder on more than one attempt to download
+[datetime.datetime(2016,10,12,22,15,0)] # Supposedly available from EumetSat website, but no files in folder on more than one attempt to download
+[datetime.datetime(2016,10,14,9,30,0)] # Problems reading
+[datetime.datetime(2016,10,15,12,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(8)]
+[datetime.datetime(2016,10,16,13,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(66)] # Weird horizontal lines on these files...
+[datetime.datetime(2017,2,26,12,15,0)] # Unavailable for download
+[datetime.datetime(2017,2,27,12,15,0)] # Unavailable for download
+[datetime.datetime(2017,2,28,12,15,0)]
+[datetime.datetime(2017,3,17,21,45,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2017,4,22,22,15,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2017,6,22,11,45,0)]
+[datetime.datetime(2017,9,22,11,30,0) + datetime.timedelta(seconds=60*15*n) for n in range(3)]
+[datetime.datetime(2017,10,10,11,45,0)]
+[datetime.datetime(2017,10,11,11,45,0)]
+[datetime.datetime(2017,10,12,11,45,0)]
+[datetime.datetime(2017,10,13,11,45,0)]
+[datetime.datetime(2017,11,7,7,0,0) + datetime.timedelta(seconds=60*15*n) for n in range(5)]
+[datetime.datetime(2018,3,6,12,15,0,0)]
+[datetime.datetime(2018,3,21,12,15,0,0)]
+[datetime.datetime(2018,5,6,20,0,0,0)]
+[datetime.datetime(2018,5,7,4,45,0,0)]
+[datetime.datetime(2018,5,7,12,0,0,0)]
+[datetime.datetime(2018,6,20,9,15,0) + datetime.timedelta(seconds=60*15*n) for n in range(3)]
+[datetime.datetime(2018,7,3,13,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2018,7,4,4,0,0)]
+[datetime.datetime(2018,7,4,4,45,0)]
+[datetime.datetime(2018,7,10,23,30,0)+datetime.timedelta(seconds=60*15*n) for n in range(16)]
+[datetime.datetime(2018, 9, 24, 12, 30)]
+[datetime.datetime(2018, 9, 27, 7, 30)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2018, 10, 10, 11, 45)]
+[datetime.datetime(2018, 10, 12, 11, 45)]
+[datetime.datetime(2018, 10, 13, 11, 45)]
+[datetime.datetime(2018, 10, 15, 11, 45)]
+[datetime.datetime(2018, 11, 19, 9, 45)]
+[datetime.datetime(2018, 12, 2, 15, 15)]
+[datetime.datetime(2018, 12, 6, 15, 0)]
+[datetime.datetime(2019, 1, 16, 10, 30)]
+[datetime.datetime(2019, 1, 22, 15, 30)+datetime.timedelta(seconds=60*15*n) for n in range(3)]
+[datetime.datetime(2019, 1, 29, 12, 0)+datetime.timedelta(seconds=60*15*n) for n in range(2)]
+[datetime.datetime(2019, 2, 14, 9, 0)]
+[datetime.datetime(2019, 3, 5, 12, 15)] # NaNs in file
+[datetime.datetime(2019, 3, 6, 12, 15)] # NaNs in file
+[datetime.datetime(2019, 3, 7, 12, 15)] # NaNs in file
+[datetime.datetime(2019, 3, 12, 13, 0)+datetime.timedelta(seconds=60*15*n) for n in range(2)] # NaNs in file
+[datetime.datetime(2019, 4, 12, 10, 15)] # Unavailable from EumetSat
+[datetime.datetime(2019, 5, 6, 15, 45)] # Unavailable from EumetSat
+[datetime.datetime(2019, 5, 14, 10, 15)] # Unavailable from EumetSat
+[datetime.datetime(2019, 5, 24, 14, 0)] # Unavailable from EumetSat
+[datetime.datetime(2019, 5, 28, 13, 15)] # Unavailable from EumetSat
+[datetime.datetime(2019, 5, 29, 2, 15)] # Unavailable from EumetSat
+[datetime.datetime(2019, 5, 29, 15, 15)] # Unavailable from EumetSat
+[datetime.datetime(2019, 8, 17, 7, 15)]
+[datetime.datetime(2019, 10, 8, 10, 30)]
+[datetime.datetime(2019, 10, 8, 10, 45)]
+[datetime.datetime(2019, 10, 16, 12, 15)]
+[datetime.datetime(2019, 11, 11, 14, 0)]
+[datetime.datetime(2019, 11, 11, 14, 15)]
+[datetime.datetime(2019, 11, 11, 14, 30)]
+[datetime.datetime(2019, 12, 1, 0, 0)]
+[datetime.datetime(2019, 12, 2, 15, 0)]
+[datetime.datetime(2019, 12, 17, 9, 45)]
+[datetime.datetime(2020, 5, 3, 8, 15)]
+[datetime.datetime(2020, 5, 3, 8, 45)]
+[datetime.datetime(2020, 5, 3, 9, 45)]
+[datetime.datetime(2020, 5, 3, 10, 45)]
+[datetime.datetime(2020, 5, 3, 11, 45)]
+[datetime.datetime(2020, 5, 3, 12, 45)]
+[datetime.datetime(2020, 5, 3, 13, 45)]
+[datetime.datetime(2020, 5, 3, 14, 45)]
+[datetime.datetime(2020, 5, 3, 15, 45)]
+[datetime.datetime(2020, 5, 3, 16, 45)]
+[datetime.datetime(2020, 5, 3, 17, 45)]
)# List of missing times - these are not available from the EumetSat website, or have multiple lines of missing data.
file_dict = {
0.6 : 'VIS006',
0.8 : 'VIS008',
1.6 : 'IR_016',
3.9 : 'IR_039',
6.2 : 'WV_062',
7.3 : 'WV_073',
8.7 : 'IR_087',
9.7 : 'IR_097',
10.8 : 'IR_108',
12.0 : 'IR_120',
13.4 : 'IR_134'
}
def read_seviri_channel(channel_list, time, subdomain=(), regrid=False, my_area=geometry.AreaDefinition('pan_africa', 'Pan-Africa on Equirectangular 0.1 degree grid used by GPM', 'pan_africa', {'proj' : 'eqc'}, 720, 730, (-2226389.816, -3896182.178, 5788613.521, 4230140.650)), interp_coeffs=()):
'''Read SEVIRI data for given channels and time (start of scan)
Includes functionality to subsample or regrid. Requires satpy.
Assumes SEVIRI files are located in sev_data_dir1 set above, with
directory structure sev_data_dir1/Year/YearMonthDay/Hour/
Args:
channel_list (list): list of channels to read, see file_dict for
possible values
time (datetime): SEVIRI file date and time, every 00, 15, 30 or
45 minutes exactly, denoting the start of the scan.
subdomain (tuple, optional): If not empty and regrid is False, then
tuple values are (West boundary,
South boundary, East boundary,
North boundary) Defaults to empty tuple.
regrid (bool, optional): If True, then data is regriddedonto grid
defined by my_area. Defaults to False.
my_area (AreaDefinition, optional): pyresmaple.geometry.AreaDefinition
Only used if regrid=True
Defaults to a Hatano equal area
projection ~4.5 km resolution
extending from ~33W to ~63E and
~29S to ~29N.
interp_coeffs (tuple, optional): Interpolation coefficients that may be
used for bilinear interpolation onto
new grid. Faccilitates use of same
coeffcients when regridding operation
is repeated in multiple calls to
read_seviri_channel.
Defaults to empty tuple.
Returns:
data (dict): Dictionary containing following entries:
lons (ndarray, shape(nlat,nlon)): Array of longitude values
lats (ndarray, shape(nlat,nlon)): Array of latitude values
interp_coeffs (tuple): If regrid is True, then the
interpolation coefficients are
returned in this variable to
speed up future regridding
channel (ndarray, shape(nlat,nlon)): Dictionary contains
separate entry for
each channel in
channel_list
'''
### 0 ### Initialise
filenames = []
sat_names = ['MSG4', 'MSG3', 'MSG2', 'MSG1']
sat_ind = -1
### 1 ### Check if files avaialable
if time in unavailable_times:
raise UnavailableFileError("SEVIRI observations for "+time.strftime("%Y/%m/%d_%H%M")+" are not available")
print('time=', time)
### 2 ### Sometimes have data from multiple instruments (e.g. 20160504_1045 has MSG3 and MSG1), this ensures most recent is prioritised.
while ((len(filenames) == 0) & (sat_ind < len(sat_names)-1)):
sat_ind += 1
filenames=glob.glob(sev_data_dir1+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*EPI*%Y%m%d%H%M-*"))+ glob.glob(sev_data_dir1+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*PRO*%Y%m%d%H%M-*"))# PRO and EPI files necessary in all scenarios
sev_dir = sev_data_dir1+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]
### 3 ### Try alternative directory for SEVIRI data if less than 2 files found before. (sev_data_dir2)
if ((len(filenames) < 2)):
sat_ind = -1
while ((len(filenames) == 0) & (sat_ind < len(sat_names)-1)): # Sometimes have data from multiple instruments (e.g. 20160504_1045 has MSG3 and MSG1), this ensures most recent is prioritised.
sat_ind += 1
#print("A, filenames=", filenames)
filenames=glob.glob(sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*EPI*%Y%m%d%H%M-*"))+ glob.glob(sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*PRO*%Y%m%d%H%M-*"))# PRO and EPI files necessary in all scenarios
sev_dir = sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]
if ((time == datetime.datetime(2016,4,11,19,0,0))| (time == datetime.datetime(2018,4,23,7,15,0))): # These files are present in sev_dir1, but corrupt
filenames=glob.glob(sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*EPI*%Y%m%d%H%M-*"))+ glob.glob(sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*PRO*%Y%m%d%H%M-*"))# PRO and EPI files necessary in all scenarios
sev_dir = sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]
if (len(filenames) < 2):
#print("B, filenames=", filenames)
#print('sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*EPI*%Y%m%d%H%M-*")=', sev_data_dir2+time.strftime("%Y/%Y%m%d/%H/*")+sat_names[sat_ind]+time.strftime("*EPI*%Y%m%d%H%M-*"))
raise MissingFileError("SEVIRI observations for "+time.strftime("%Y/%m/%d_%H%M")+" are missing. Please check if they can be downloaded and if not, add to the list of unavailable times.")
else:
for channel in channel_list:
filenames=filenames + glob.glob(sev_dir+'*'+file_dict[channel]+time.strftime("*%Y%m%d%H%M-*")) # add channels required
#print("C, filenames=", filenames)
scene = satpy.Scene(reader="seviri_l1b_hrit", filenames=filenames)
data = {}
scene.load(channel_list)
if regrid != False:
lons, lats = my_area.get_lonlats()
if len(interp_coeffs) == 0:
interp_coeffs = bilinear.get_bil_info(scene[channel_list[0]].area, my_area, radius=50e3, nprocs=1)
data.update({'interp_coeffs': interp_coeffs})
for channel in channel_list:
data.update({str(channel): bilinear.get_sample_from_bil_info(scene[channel].values.ravel(), interp_coeffs[0], interp_coeffs[1], interp_coeffs[2], interp_coeffs[3], output_shape=my_area.shape)})
else:
if len(subdomain) > 0:
scene = scene.crop(ll_bbox=subdomain)
lons, lats = scene[channel_list[0]].area.get_lonlats()
lons = lons[:,::-1] # Need to invert y-axis to get longitudes increasing.
lats = lats[:,::-1]
for channel in channel_list:
data.update({str(channel) : scene[channel].values[:,::-1]})
data.update({'lons' : lons, 'lats' : lats, 'interp_coeffs' : interp_coeffs})
# Compressed files are decompressed to TMPDIR. Now tidy up
# This doesn't seem to be applicable to me...
delete_list = glob.glob(my_tmpdir+'/'+time.strftime("*%Y%m%d%H%M-*"))
for d in delete_list: os.remove(d)
return data
class FileError(Exception):
"""Base class for other exceptions
"""
pass
class UnavailableFileError(FileError):
"""Raised when the file is not available from EumetSat
"""
pass
class MissingFileError(FileError):
"""Raised when the file is missing, but we still need to
check whether it is definitely not available from EumetSat
"""
pass
gpm_dir = '/badc/gpm/data/GPM-IMERG-v6/' # Change as appropriate
def read_gpm(timelist, lon_min=-20., lon_max=52., lat_min=-35., lat_max=38., varname='HQprecipitation'):
'''Reads GPM IMERG data for specified times and lat-lon limits
Args:
timelist (list): times to be read
lon_min (float, optonal): Longitude of Western boundary of region of interest. Defaults to -23.
lon_max (float, optional): Longitude of Eastern boundary of region of interest. Defaults to 58.
lat_min (float, optional): Latitude of Southern boundary of region of interest. Defaults to -15.
lat_max (float, optional): Latitude of Northern boundary of region of interest. Defaults to26.
varname (string, optional): Name of IMERG variable to read. Defaults to precipitationCal
Returns:
lon (ndarray, shape(nlon)): Array of longitude values
lat (ndarray, shape(nlat)): Array of latitude values
rain (ndarray, shape(ntimes, nlon, nlat)): Array of values for varname
'''
if ((varname == 'precipitationNoIRCal') or (varname == 'precipitationNoIRUncal')):
lon, lat, rain = read_gpm_no_ir(timelist, lon_min=lon_min, lon_max=lon_max, lat_min=lat_min, lat_max=lat_max, varname=varname)
return lon, lat, rain
rain = []
for i, time in enumerate(timelist):
f = get_gpm_filename(time)
dataset = h5py.File(f, 'r')
lon = dataset['Grid']['lon'][:]
lat = dataset['Grid']['lat'][:]
ind_lon = np.where((lon >= lon_min) & (lon <= lon_max))[0]
ind_lat = np.where((lat >= lat_min) & (lat <= lat_max))[0]
if dataset['Grid'][varname].ndim == 3:
rain += [dataset['Grid'][varname][0,ind_lon[0]:ind_lon[-1]+1, ind_lat[0]:ind_lat[-1]+1]]
else:
print(("dataset['Grid'][varname].ndim=", dataset['Grid'][varname].ndim))
sys.exit()
rain = np.ma.masked_array(np.array(rain), mask=(np.array(rain) < 0.0))
return lon[ind_lon], lat[ind_lat], rain
def get_gpm_filename(time):
'''Identify GPM IMERG HDF file corresponding to given time
'''
f = glob.glob(gpm_dir + time.strftime('%Y/%j/*%Y%m%d-S%H%M*.HDF5'))
if len(f) != 1:
print(("gpm_dir + time.strftime('%Y/%j/*%Y%m%d-S%H%M*.HDF5')=", gpm_dir + time.strftime('%Y/%j/*%Y%m%d-S%H%M*.HDF5')))
print(("f=", f))
sys.exit()
return f[0]
|
print('''
Exercício 57 da aula 14 de Python
Curso do Guanabara
Day 23 Code Python - 22/05/2018
''')
lista = {'M': 'MASCULINO', 'F': 'FEMININO'}
aux = '' #input('Qual o sexo [M/F] ? ').strip().upper()
while aux not in lista:
aux = input('Qual o sexo [M/F] ? ').strip().upper()
print(aux)
|
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name = 'MyProject',
ext_modules = cythonize(["*.pyx"]),
include_dirs=[numpy.get_include()]
)
|
""" Cisco_IOS_XR_ethernet_cfm_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BandwidthNotificationState(Enum):
"""
BandwidthNotificationState (Enum Class)
Bandwidth notification state
.. data:: ok = 1
Link is not degraded
.. data:: degraded = 2
Link is in degraded state
"""
ok = Enum.YLeaf(1, "ok")
degraded = Enum.YLeaf(2, "degraded")
class CfmAisInterval(Enum):
"""
CfmAisInterval (Enum Class)
Cfm ais interval
.. data:: Y_1s = 4
1s
.. data:: Y_1m = 6
1m
"""
Y_1s = Enum.YLeaf(4, "1s")
Y_1m = Enum.YLeaf(6, "1m")
class CfmCcmInterval(Enum):
"""
CfmCcmInterval (Enum Class)
Cfm ccm interval
.. data:: Y_3__DOT__3ms = 1
3.3ms
.. data:: Y_10ms = 2
10ms
.. data:: Y_100ms = 3
100ms
.. data:: Y_1s = 4
1s
.. data:: Y_10s = 5
10s
.. data:: Y_1m = 6
1m
.. data:: Y_10m = 7
10m
"""
Y_3__DOT__3ms = Enum.YLeaf(1, "3.3ms")
Y_10ms = Enum.YLeaf(2, "10ms")
Y_100ms = Enum.YLeaf(3, "100ms")
Y_1s = Enum.YLeaf(4, "1s")
Y_10s = Enum.YLeaf(5, "10s")
Y_1m = Enum.YLeaf(6, "1m")
Y_10m = Enum.YLeaf(7, "10m")
class CfmMepDir(Enum):
"""
CfmMepDir (Enum Class)
Cfm mep dir
.. data:: up = 0
Up MEP
.. data:: down = 1
Down MEP
"""
up = Enum.YLeaf(0, "up")
down = Enum.YLeaf(1, "down")
|
#
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Functions for parsing the inheritance for overriding attributes.
Overriding attributes are defined and can be overridden further down the hierarchy.
There are two types - standard and merging. If an attribute is defined as a
merging attribute, only individual attribute elements override, otherwise
the entire attribute is overridden by any later definitions.
The hierarchy is determined as 'depth-first' in multiple inheritance.
eg. Look first for the attribute in the current target. If not found,
look for the attribute in the first target's parent, then in the parent of the parent and so on.
If not found, look for the property in the rest of the target's parents, relative to the
current inheritance level.
This means a target on a higher level could potentially override one on a lower level.
"""
from collections import deque
from functools import reduce
from typing import Dict, List, Any, Deque, Set
from mbed_targets._internal.targets_json_parsers.accumulating_attribute_parser import ALL_ACCUMULATING_ATTRIBUTES
MERGING_ATTRIBUTES = ("config", "overrides")
NON_OVERRIDING_ATTRIBUTES = ALL_ACCUMULATING_ATTRIBUTES + ("public", "inherits")
def get_overriding_attributes_for_target(all_targets_data: Dict[str, Any], target_name: str) -> Dict[str, Any]:
"""Parses the data for all targets and returns the overriding attributes for the specified target.
Args:
all_targets_data: a dictionary representation of the contents of targets.json
target_name: the name of the target to find the attributes of
Returns:
A dictionary containing all the overriding attributes for the chosen target
"""
override_order = _targets_override_hierarchy(all_targets_data, target_name)
return _determine_overridden_attributes(override_order)
def get_labels_for_target(all_targets_data: Dict[str, Any], target_name: str) -> Set[str]:
"""The labels for a target are the names of all the boards (public and private) that the board inherits from.
The order of these labels are not reflective of inheritance order.
Args:
all_targets_data: a dictionary representation of the contents of targets.json
target_name: the name of the target to find the labels for
Returns:
A set of names of boards that make up the inheritance tree for the target
"""
targets_in_order = _targets_override_hierarchy(all_targets_data, target_name)
return _extract_target_labels(targets_in_order, target_name)
def _targets_override_hierarchy(all_targets_data: Dict[str, Any], target_name: str) -> List[dict]:
"""List all ancestors of a target in order of overriding inheritance (depth-first).
Using a depth-first traverse of the inheritance tree, return a list of targets in the
order of inheritance, starting with the target itself and finishing with its highest ancestor.
Eg. An inheritance tree diagram for target "A" below
D E
| |
B C
|_____|
|
A
Would give us an inheritance order of [A, B, D, C, E]
Args:
all_targets_data: a dictionary representation of all the data in a targets.json file
target_name: the name of the target we want to calculate the attributes for
Returns:
A list of dicts representing each target in the hierarchy.
"""
targets_in_order: List[dict] = []
still_to_visit: Deque[dict] = deque()
still_to_visit.appendleft(all_targets_data[target_name])
while still_to_visit:
current_target = still_to_visit.popleft()
targets_in_order.append(current_target)
for parent_target in reversed(current_target.get("inherits", [])):
still_to_visit.appendleft(all_targets_data[parent_target])
return targets_in_order
def _determine_overridden_attributes(targets_in_order: List[dict]) -> Dict[str, Any]:
"""Finds all the overrideable attributes for a target from its list of ancestors.
Combines the attributes from all the targets in the hierarchy. Starts from the highest ancestor
reduces down to the target itself, overriding if they define the same attribute.
Identifies the attributes that should be merged (only their elements overridden,
not the entire attribute definition) and updates their contents.
Removes any accumulating attributes - they will be handled by a separate parser.
Args:
targets_in_order: list of targets in order of inheritance, starting with the target up to its highest ancestor
Returns:
A dictionary containing all the overridden attributes for a target
"""
target_attributes = _reduce_right_list_of_dictionaries(targets_in_order)
for merging_attribute in MERGING_ATTRIBUTES:
override_order_for_single_attribute = [target.get(merging_attribute, {}) for target in targets_in_order]
merged_attribute_elements = _reduce_right_list_of_dictionaries(list(override_order_for_single_attribute))
if merged_attribute_elements:
target_attributes[merging_attribute] = merged_attribute_elements
overridden_attributes = _remove_unwanted_attributes(target_attributes)
return overridden_attributes
def _reduce_right_list_of_dictionaries(list_of_dicts: List[dict]) -> Dict[str, Any]:
"""Starting from rightmost dict, merge dicts together, left dict overriding the right."""
return reduce(lambda x, y: {**x, **y}, reversed(list_of_dicts))
def _remove_unwanted_attributes(target_attributes: Dict[str, Any]) -> Dict[str, Any]:
"""Removes all non-overriding attributes.
Defined in NON_OVERRIDING_ATTRIBUTES.
Accumulating arguments are inherited in a different way that is handled by its own parser.
The 'public' attribute is not inherited.
The 'inherits' attribute is only needed to calculate the hierarchies.
Args:
target_attributes: a dictionary of attributes for a target
Returns:
The target attributes with the accumulating attributes removed.
"""
output_dict = target_attributes.copy()
for attribute in NON_OVERRIDING_ATTRIBUTES:
output_dict.pop(attribute, None)
return output_dict
def _extract_target_labels(targets_in_order: List[dict], target_name: str) -> Set[str]:
"""Collect a set of all the board names from the inherits field in each target in the hierarchy.
Args:
targets_in_order: list of targets in order of inheritance, starting with the target up to its highest ancestor
target_name: the name of the target to find the labels for
Returns:
A set of names of boards that make up the inheritance tree for the target
"""
labels = {target_name}
for target in targets_in_order:
for parent in target.get("inherits", []):
labels.add(parent)
return labels
|
for i in range(0, 3, 1):
print(i)
|
import datetime
import json
from prediction import predict
import psycopg2
from redis import Redis
from rows import DumpRow, PredictionRow
import time
def predict_thread(data, kind, cursor, connection):
prediction_table, rows_table = data["prediction_table"], data["rows_table"]
station_id = data["station_id"]
dump_rows = DumpRow.fetch_all(cursor, rows_table, station_id)
prediction_rows = PredictionRow.fetch_all(cursor, rows_table, data["ids"])
predictions = predict(data, dump_rows, prediction_rows, kind)
results = []
for prediction in predictions:
timestamp = datetime.datetime.utcfromtimestamp(prediction["timestamp"])
timestamp = datetime.datetime(timestamp.year, timestamp.month, timestamp.day, timestamp.hour)
now = datetime.datetime.utcnow()
result = "(%d, TIMESTAMP '%s', %d, '%s', TIMESTAMP '%s', TIMESTAMP '%s')" % (station_id, timestamp, prediction["value"], kind, now, now)
results.append(result)
cursor.execute("INSERT INTO %s (station_id, datetime, available_bikes, kind, created_at, updated_at) VALUES %s" % (prediction_table, ", ".join(results)))
connection.commit()
if __name__ == '__main__':
print "Connecting to PostgreSQL..."
connection = psycopg2.connect("dbname=velib_development user=velib password=velib")
cursor = connection.cursor()
print "Connecting to Redis..."
redis = Redis(host="localhost", port=6379, db=0)
pubsub = redis.pubsub()
pubsub.subscribe("prediction")
print "Subscribed to 'prediction' channel"
for item in pubsub.listen():
if item["type"] != "message":
continue
data = json.loads(item["data"])
print "Received data for station id: %s..." % str(data["station_id"])
for kind in ("scikit_lasso", "scikit_ridge"):
predict_thread(data, kind, cursor, connection)
cursor.close()
connection.close()
|
#!/usr/bin/env python
# coding: utf-8
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
import os
import sys
import logging
import argparse
import collections
from sear.lexicon import DictLexicon
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-t", "--test", type=int, choices=(0, 1), default=0)
arg_parser.add_argument("-s", "--test_size", type=str, choices=("tiny", "medium", "large"), default="tiny")
arg_parser.add_argument("-i", "--input", type=str)
arguments = arg_parser.parse_args()
if arguments.test == 1:
input_path = os.path.join(
arguments.input,
"test_out",
arguments.test_size,
"sentence"
)
else:
input_path = arguments.input
logging.info("Input: %s" % input_path)
logging.info("Initializing lexicon.")
lexicon = DictLexicon(input_path)
lexicon.load()
counter = collections.Counter()
for term, term_id_and_freq in lexicon.term_dict.iteritems():
counter[term] = term_id_and_freq[1]
i = 0
sys.stdout.write("i,term,freq\n")
for term, freq in counter.most_common():
i += 1
sys.stdout.write("%d,%s,%d\n" % (i, term, freq))
logging.info("[DONE]")
|
from flask import Flask
from werkzeug.exceptions import HTTPException
from . import blueprints, cors, database, schema
# Configure the app
app = Flask(__name__)
app.config.from_object("better_todos.config")
# Setup dependencies
cors.init(app)
database.init(app)
schema.init(app)
# Register the blueprints (modules)
for prefix, blueprint in blueprints.mapping.items():
app.register_blueprint(blueprint, url_prefix=prefix)
# Register any error handlers
@app.errorhandler(HTTPException)
def http_exception(error: HTTPException):
"""
Convert HTTP exceptions into JSON
:param error: the HTTP exception
:returns: a JSON response with the desired status code
"""
return {"code": error.code, "message": error.name.lower()}, error.code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.