content
stringlengths 5
1.05M
|
|---|
from math import floor
class StepperError(Exception):
pass
class Stepper():
sMicrostep = (None, 1, 2, 2, 4, 8, 16, 32) # this might be wrong, confusing from uCtrl label
sPulsePerRev = (None, 200, 400, 800, 1600, 3200, 6400)
sCurrent = (0.5, 1.0, 1.5, 2.0, 2.5, 2.8, 3.0, 3.5)
sPkCurrent = (0.7, 1.2, 1.7, 2.2, 2.7, 2.9, 3.2, 4.0)
gearboxRatio = 77
numSwitches = 6
def __init__(self, io, pinDir, pinPul, pinEna = None, maxAngleDeg = 70):
self.io = io
self.pinDir = pinDir
self.pinPul = pinPul
self.pinEna = pinEna
self.pulsePerRev = Stepper.sPulsePerRev[1]
self.anglePos = 0
self.maxAngleDeg = maxAngleDeg
def setupPin(self, pin):
'''
uses rpi IO to set pin to output
'''
self.io.setMode(pin)
def setup(self):
'''
intializes pul and dir pins and sets them low
'''
self.setupPin(self.pinDir)
self.setPin(self.pinDir, 0)
self.setupPin(self.pinPul)
self.setPin(self.pinPul, 0)
def setPin(self, pin, state):
'''
general way to set pin
'''
self.io.write(pin, state)
def setMaxAngleDeg(self, angle):
self.maxAngleDeg = angle
def getPin(self, pin):
return self.io.read(pin)
def toggleDir(self):
self.io.toggle(self.pinDir)
def setPulsePerRev(self, ppr):
if ppr not in Stepper.sPulsePerRev:
raise(StepperError("{} not a valid pulse per revolution".format(ppr)))
else:
self.pulsePerRev = ppr
def calcPulses(self, degree):
stepAngle = 360/(self.pulsePerRev*Stepper.gearboxRatio)
pulses = degree/stepAngle
return abs(floor(pulses))
def reset(self):
'''
returns stepper to original position
'''
self.rotate(self.anglePos*-1)
def store(self):
f = open("stepper.txt", 'w')
f.write(str(self.anglePos))
f.close()
def rotate(self, degree):
'''
this considers the rotation angle to be relative and will rotate the stepper motor by the amount specified
unless bounds are exceeded
'''
# check bounds
if abs(self.anglePos + degree) > abs(self.maxAngleDeg):
print(f"Exceeds rotation bounds of {self.maxAngleDeg} degrees. No rotation")
return
# check direction
# need to double check direction mapping
if degree < 0:
bDir = 1
else:
bDir = 0
# get number of pulses to drive
pulses = self.calcPulses(degree)
# ok now set direction and pulse somehow
print("pulses: ", pulses, " direction: ", bDir)
self.setPin(self.pinDir, bDir)
if pulses > 8000:
ramp = self.io.calcRamp(pulses)
print(ramp)
self.io.generateRamp(self.pinPul, ramp)
else:
self.io.sendPulses(self.pinPul, pulses, 800)
# store rotation
self.anglePos += degree
if __name__ == "__main__" and __package__ is None:
# testing
#from os import sys, path
#sys.path.append(path.dirname(path.abspath(__file__)))
import time
try:
from .rpi_interface import IO
except Exception:
from rpi_interface import IO
# Set GPIO17/PIN11 for DIR control
DIR = 17
# Set GPIO27/PIN13 for PUL control
PUL = 27
gpio = IO()
stepper = Stepper(gpio, pinDir=DIR, pinPul=PUL)
stepper.setup()
#stepper.gearboxRatio = 1
stepper.setPulsePerRev(200)
wait = input()
start = time.time()
stepper.rotate(360)
print("elapsed time: ",time.time()-start)
wait = input()
stepper.rotate(-36)
wait = input()
stepper.rotate(36)
|
# -*- coding: utf-8 -*-
import time
from concurrent.futures import as_completed
from common.db import read_session_scope, write_session_scope
from config.config_loader import logger, global_config
from mall_spider.common.enums import RiskType, PickleFileType
from mall_spider.dao.stream_risk_dao import get_stream_risk_dao
from mall_spider.job.smorf import Smorf
from mall_spider.model.cmm_sys_stream_risk import CmmSysStreamRisk
from mall_spider.spiders.actions.action_service import ActionService
from mall_spider.spiders.actions.cookie_service import get_cookie_service
from mall_spider.spiders.actions.executor_service import ExecutorService
from mall_spider.spiders.actions.proxy_service import get_proxy_service
from mall_spider.spiders.spider_qt5 import spider_qt5_bootstrap
class TaobaoListPageJob(ActionService, Smorf):
def __init__(self, pool_size):
super().__init__()
self._counter = 0
self._executor = ExecutorService(pool_size)
self._proxy_service = get_proxy_service()
self._cookie_service = get_cookie_service()
s_accounts = global_config.s_accounts
self._account_counter = dict()
for item in s_accounts:
self._account_counter[item['username']] = 0
self._fail_account_counter = dict()
for item in s_accounts:
self._fail_account_counter[item['username']] = 0
def execute(self, num):
cycle_login_num = 0
tasks = self.execute_taobao_integrate_list_actions()
i = 0
future_tasks = {}
with read_session_scope() as session:
_stream_risk_dao = get_stream_risk_dao(session=session)
rsts = _stream_risk_dao.base_query.all()
risk_usernames = set(item.raw_data for item in rsts)
s_accounts = global_config.s_accounts
for task in tasks:
account = s_accounts[self._counter % len(s_accounts)]
proxy = self._proxy_service.get_static_proxy(account['username'])
# raw_data = task.raw_data
# account = raw_data['account']
self._counter += 1
i += 1
if account['username'] in risk_usernames:
continue
if i < num:
future_tasks[
self._executor.submit(self._execute_taobao_integrate_list_actions, task, account, proxy)] = task
for future in as_completed(future_tasks):
try:
account, flag, force = future.result()
if flag:
if force:
with write_session_scope() as session:
_stream_risk_dao = get_stream_risk_dao(session=session)
self._risk(stream_risk_dao=_stream_risk_dao, account=account)
# self._login(account=account, force=True if cycle_login_num == 0 else False)
cycle_login_num += 1
else:
self._fail_account_counter[account['username']] += 1
if self._fail_account_counter[account['username']] > 2:
self._cookie_service.remove(account=account)
with write_session_scope() as session:
_stream_risk_dao = get_stream_risk_dao(session=session)
self._risk(stream_risk_dao=_stream_risk_dao, account=account)
# self._login(account=account, force=True if cycle_login_num == 0 else False)
cycle_login_num += 1
else:
url = 'https://s.m.taobao.com/h5?q=Flyco%2BFR5218&search=%E6%8F%90%E4%BA%A4&tab=all'
# url = 'https://s.m.taobao.com/h5?q=Flyco%2BFR5218&search=%E6%8F%90%E4%BA%A4&tab=all'
proxy = self._proxy_service.get_origin_static_proxy(account['username'])
cookies = self._cookie_service.load(account=account, type_=PickleFileType.origin_cookie)
time.sleep(5)
cookies, origin_cookies = spider_qt5_bootstrap(url=url, account=account, risk=False,
proxy=proxy, cookies=cookies)
self._cookie_service.dump(cookies=cookies, account=account)
self._cookie_service.dump(cookies=origin_cookies, account=account,
type_=PickleFileType.origin_cookie)
self._account_counter[account['username']] = 0
else:
self._fail_account_counter[account['username']] = 0
self._account_counter[account['username']] += 1
if self._account_counter[account['username']] >= 2:
url = 'https://s.m.taobao.com/h5?q=Flyco%2BFR5218&search=%E6%8F%90%E4%BA%A4&tab=all'
# url = 'https://s.m.taobao.com/h5?q=Flyco%2BFR5218&search=%E6%8F%90%E4%BA%A4&tab=all'
proxy = self._proxy_service.get_origin_static_proxy(account['username'])
cookies = self._cookie_service.load(account=account, type_=PickleFileType.origin_cookie)
time.sleep(5)
cookies, origin_cookies = spider_qt5_bootstrap(url=url, account=account, risk=False,
proxy=proxy, cookies=cookies)
self._cookie_service.dump(cookies=cookies, account=account)
self._cookie_service.dump(cookies=origin_cookies, account=account,
type_=PickleFileType.origin_cookie)
self._account_counter[account['username']] = 0
except Exception as e:
logger.error(e)
def _risk(self, stream_risk_dao, account):
entity = stream_risk_dao.query_one(_filter=[CmmSysStreamRisk.type == int(RiskType.taobao_search),
CmmSysStreamRisk.raw_data == account['username']])
if not entity:
entity = CmmSysStreamRisk()
entity.raw_data = account['username']
entity.type = int(RiskType.taobao_search)
stream_risk_dao.insert_entity(entity=entity)
def init(self):
super().init()
def init_argparse(self, parser):
super().init_argparse(parser)
def process(self):
# return super().process()
self.execute(2)
time.sleep(10)
if __name__ == "__main__":
s = TaobaoListPageJob(10)
logger.info("start to execute taobao_list_page job")
s.run()
# s.process()
logger.error("exit taobao_list_page job")
|
import time
import logging
from instabot.api import api
from instachatbot.nodes import Node, MenuNode
from instachatbot.state import Conversation
from instachatbot.storage import Storage
class InstagramChatBot:
def __init__(self, menu: MenuNode, storage: Storage = None, trigger=None):
self.logger = logging.getLogger('InstagramChatBot')
self._api = api.API()
self.menu_node = menu
self._last_message_timestamp = {}
self.conversation = Conversation(menu, storage)
self.user_id = None
self.trigger = trigger
def login(self, username, password, proxy=None):
self._api.login(username, password, proxy=proxy)
self.user_id = self._api.user_id
def start(self, polling_interval=1):
start_timestamp = time.time() * 1000000
while True:
time.sleep(polling_interval)
# approve pending threads
self._api.get_pending_inbox()
for thread in self._api.last_json['inbox']['threads']:
self._api.approve_pending_thread(thread['thread_id'])
# process messages
self._api.get_inbox_v2()
for message in self.parse_messages(
self._api.last_json, start_timestamp):
self.logger.debug('Got message from %s: %s',
message['from'], message['text'])
context = {
'bot': self
}
self.handle_message(message, context)
def stop(self):
self._api.logout()
def parse_messages(self, body, start_timestamp):
threads = body['inbox']['threads']
for thread in threads:
if thread.get('is_group'):
continue
thread_id = thread['thread_id']
last_seen_timestamp = thread.get(
'last_seen_at', {}).get(
str(self.user_id), {}).get('timestamp', 0)
if last_seen_timestamp:
last_seen_timestamp = int(last_seen_timestamp)
last_seen_timestamp = max(
last_seen_timestamp,
self._last_message_timestamp.get(thread_id, 0))
items = thread.get('items')
users = {user['pk']: user['username'] for user in thread['users']}
users[body['viewer']['pk']] = body['viewer']['username']
for item in items:
if start_timestamp > item['timestamp']:
continue
if last_seen_timestamp >= item['timestamp']:
continue
self._last_message_timestamp[thread_id] = item['timestamp']
yield {
'id': str(item['item_id']),
'date': item['timestamp'],
'type': item['item_type'],
'text': item.get('text'),
'from': {
'id': str(item['user_id']),
'username': users.get(item['user_id'])
},
'chat': {
'id': str(thread_id),
'title': thread['thread_title'],
'type': thread['thread_type'],
}
}
def handle_message(self, message, context):
chat_id = message['chat']['id']
state = self.conversation.get_state(chat_id) or {}
node: Node = state.get('node') or self.menu_node
if not state:
# start menu only if trigger message is sent
if self.trigger and message.get('text') != self.trigger:
return
jump = node.handle(message, state, context)
self.conversation.save_state(chat_id, state)
if jump:
self.handle_message(message, context)
# show root menu again only if trigger is not required
if not state and not self.trigger:
self.handle_message(message, context)
def get_user_id_from_username(self, username):
self._api.search_username(username)
if "user" in self._api.last_json:
return str(self._api.last_json["user"]["pk"])
else:
return None
def send_direct_message(self, user_id, text):
logging.debug('Sending message to %s: %s', user_id, text)
self._api.send_direct_item(item_type='text', users=[user_id],
text=text)
def send_direct_photo(self, user_id, image_path):
logging.debug('Sending photo to %s: %s', user_id, image_path)
self._api.send_direct_item(item_type='photo', users=[user_id],
filepath=image_path)
|
import numpy as np
def nextthing():
var = np.random.rand(1)
if var<0.3:
print('Attack human!')
elif var<0.7:
print('Be cute.')
else:
print('Make weird noises.')
|
import os
from pathlib import Path
from django.contrib import messages
from django.contrib import admin
from django.contrib.admin import display # was introduced in Django 3.2
from django.conf import settings
from .models import Account
from .models import Brand
from .models import DataSource
from .models import DataSourceDetail
# Register your models here.
class BrandInline(admin.TabularInline):
model = Brand
extra = 2
def save_model(self, request, obj, form, change):
print('ddd')
# https://show-me-the-money.tistory.com/entry/Django-Admin%EC%97%90%EC%84%9C-Form-%EC%A0%80%EC%9E%A5-%EC%BB%A4%EC%8A%A4%ED%84%B0%EB%A7%88%EC%9D%B4%EC%A7%95%ED%95%98%EA%B8%B0
class AccountAdmin(admin.ModelAdmin):
fieldsets = [ # choose editable attr
(None, {'fields': ['s_acct_title']}),
# ('Date information', {'fields': ['date_reg'], 'classes': ['collapse']}),
]
inlines = [BrandInline]
list_display = ('s_acct_title', 'date_reg')
list_filter = ['date_reg']
search_fields = ['s_acct_title']
def save_model(self, request, obj, form, change):
# https://devnauts.tistory.com/197
obj.user = request.user
# self.message_user(request, 'dsafasdf', level=messages.ERROR)
# # You can also use warning, debug, info and success in place of error
super().save_model(request, obj, form, change)
# print(obj.pk)
s_acct_root_abs_path = os.path.join(settings.SV_STORAGE_ROOT, str(obj.pk))
if not os.path.isdir(s_acct_root_abs_path):
os.makedirs(s_acct_root_abs_path)
def save_formset(self, request, form, formset, change):
# https://jay-ji.tistory.com/32
formset.save()
n_acct_pk = None
n_brand_pk = None
if formset.is_valid():
for form1 in formset:
if 'sv_acct' in form1.cleaned_data.keys():
n_acct_pk = form1.cleaned_data['sv_acct'].pk
if 's_brand_title' in form1.cleaned_data.keys():
if not form1.cleaned_data['DELETE']:
try:
o_brand_appended = Brand.objects.get(s_brand_title=form1.cleaned_data['s_brand_title'])
except Brand.DoesNotExist:
o_brand_appended = None
n_brand_pk = o_brand_appended.pk
if n_acct_pk and n_brand_pk:
s_brand_root_abs_path = os.path.join(settings.SV_STORAGE_ROOT, str(n_acct_pk), str(n_brand_pk))
if not os.path.isdir(s_brand_root_abs_path):
os.makedirs(s_brand_root_abs_path)
def delete_model(self, request, obj):
obj.user = request.user
print('deleted')
super().delete_model(request, obj)
class DataSourceInline(admin.TabularInline):
# https://stackoverflow.com/questions/63916655/how-can-i-access-attributes-of-a-model-in-admin-tabularinline-that-is-at-the-end
model = DataSource
extra = 2
readonly_fields = ['date_reg', ]
class BrandAdmin(admin.ModelAdmin):
# fieldsets = [ # choose editable attr
# (None, {'fields': ['s_brand_title']}),
# # ('Date information', {'fields': ['date_reg'], 'classes': ['collapse']}),
# ]
# readonly_fields = ['s_brand_title', ]
inlines = [DataSourceInline]
list_display = ('sv_acct', 's_brand_title', 'date_reg')
list_filter = ['date_reg']
search_fields = ['sv_acct__s_acct_title', 's_brand_title']
def save_model(self, request, obj, form, change):
obj.user = request.user
# s_base_dir = Path(__file__).resolve().parent.parent
# print(form.cleaned_data)
super().save_model(request, obj, form, change)
# print(obj.pk)
s_acct_root_abs_path = os.path.join(settings.SV_STORAGE_ROOT, str(obj.pk))
if not os.path.isdir(s_acct_root_abs_path):
os.makedirs(s_acct_root_abs_path)
def save_formset(self, request, form, formset, change):
# https://jay-ji.tistory.com/32
formset.save()
if formset.is_valid():
n_acct_pk = None
n_brand_pk = None
n_data_source_pk = None
# print(formset.cleaned_data)
for form1 in formset:
# print(form1.cleaned_data)
if 'sv_brand' in form1.cleaned_data.keys():
n_acct_pk = form1.cleaned_data['sv_brand'].sv_acct_id
n_brand_pk = form1.cleaned_data['sv_brand'].pk
if not form1.cleaned_data['DELETE']:
try:
o_data_source_appended = DataSource.objects.get(sv_brand=n_brand_pk, n_data_source=form1.cleaned_data['n_data_source'])
except Brand.DoesNotExist:
o_data_source_appended = None
n_data_source_pk = o_data_source_appended.pk
if n_acct_pk and n_brand_pk and n_data_source_pk and n_data_source_pk != 0:
s_data_source_root_abs_path = os.path.join(settings.SV_STORAGE_ROOT, str(n_acct_pk),
str(n_brand_pk), str(o_data_source_appended))
if not os.path.isdir(s_data_source_root_abs_path):
os.makedirs(s_data_source_root_abs_path)
def delete_model(self, request, obj):
obj.user = request.user
print('deleted')
super().delete_model(request, obj)
# Register your models here.
class DataSourceIdInline(admin.TabularInline):
model = DataSourceDetail
extra = 1
# readonly_fields = ['s_data_source_id', ]
class DataSourceAdmin(admin.ModelAdmin):
# fieldsets = [ # choose editable attr
# (None, {'fields': ['s_brand_title']}),
# # ('Date information', {'fields': ['date_reg'], 'classes': ['collapse']}),
# ]
readonly_fields = ['n_data_source', ]
inlines = [DataSourceIdInline]
list_display = ('get_account', 'sv_brand', 'n_data_source', 'date_reg')
list_filter = ['date_reg']
search_fields = ['sv_brand__sv_acct__s_acct_title', 'sv_brand__s_brand_title', 'n_data_source']
# https://stackoverflow.com/questions/163823/can-list-display-in-a-django-modeladmin-display-attributes-of-foreignkey-field
@display(ordering='sv_brand__sv_acct', description='구좌 명칭')
def get_account(self, obj):
return obj.sv_brand.sv_acct
def save_model(self, request, obj, form, change):
obj.user = request.user
# print(request.POST)
# print(obj)
s_base_dir = Path(__file__).resolve().parent.parent
# print(s_base_dir)
super().save_model(request, obj, form, change)
def save_formset(self, request, form, formset, change):
# https://jay-ji.tistory.com/32
if formset.is_valid():
n_acct_id = None
n_brand_pk = None
n_data_source_id = None
# print(formset.cleaned_data)
for form1 in formset:
# print(form1.cleaned_data)
if 'sv_data_source' in form1.cleaned_data.keys():
# print(form1.cleaned_data['sv_data_source'].sv_brand_id)
n_brand_pk = form1.cleaned_data['sv_data_source'].sv_brand_id # 48
# print(n_brand_pk)
if not form1.cleaned_data['DELETE']:
s_data_source_serial_id = form1.cleaned_data['s_data_source_serial']
# print(form1.cleaned_data['sv_data_source'].validate_source_id(s_data_source_serial_id))
if form1.cleaned_data['sv_data_source'].validate_source_id(s_data_source_serial_id):
try:
o_brand = Brand.objects.get(pk=n_brand_pk)
except Brand.DoesNotExist:
o_brand = None
n_acct_id = o_brand.sv_acct_id
n_data_source_id = form1.cleaned_data['sv_data_source']
if n_acct_id and n_brand_pk and n_data_source_id and n_data_source_id != 0 \
and len(s_data_source_serial_id):
s_data_source_id_abs_path = os.path.join(settings.SV_STORAGE_ROOT, str(n_acct_id),
str(n_brand_pk), str(n_data_source_id),
s_data_source_serial_id)
if not os.path.isdir(s_data_source_id_abs_path):
os.makedirs(s_data_source_id_abs_path)
else:
self.message_user(request, '형식에 맞지 않는 데이터 소스 일련번호입니다. 삭제해 주세요.', level=messages.ERROR)
formset.save()
def delete_model(self, request, obj):
obj.user = request.user
print('deleted')
super().delete_model(request, obj)
class DataSourceDetailAdmin(admin.ModelAdmin):
# fieldsets = [ # choose editable attr
# (None, {'fields': ['s_brand_title']}),
# # ('Date information', {'fields': ['date_reg'], 'classes': ['collapse']}),
# ]
readonly_fields = ['sv_data_source', ]
list_display = ('get_account', 'get_brand', 'sv_data_source', 's_data_source_serial', )
list_filter = ['date_reg']
search_fields = ['sv_data_source__sv_brand__sv_acct__s_acct_title', 'sv_data_source__sv_brand__s_brand_title',
'sv_data_source__n_data_source', 's_data_source_serial']
# https://stackoverflow.com/questions/163823/can-list-display-in-a-django-modeladmin-display-attributes-of-foreignkey-field
@display(ordering='sv_brand__sv_acct', description='구좌 명칭')
def get_account(self, obj):
return obj.sv_data_source.sv_brand.sv_acct
@display(ordering='sv_data_source__sv_brand', description='브랜드 명칭')
def get_brand(self, obj):
return obj.sv_data_source.sv_brand
admin.site.register(Account, AccountAdmin)
admin.site.register(Brand, BrandAdmin)
admin.site.register(DataSource, DataSourceAdmin)
admin.site.register(DataSourceDetail, DataSourceDetailAdmin)
|
from userver.object.asserts import Assertions
from userver.object.device import FieldDevice
from utils.errors import PatchError
from wtforms import Form, StringField, validators
from wtforms.validators import StopValidation
from userver.object.const import ClassType
from binascii import unhexlify
from .validators import eui_validator, appskey_validator, nwkskey_validator, addr_validator, appkey_validator, class_type_validator, fcnt_validator, bool_validator, addr_available
class ABPDev(Form):
name = StringField("Name", validators=[validators.Optional(strip_whitespace=True)])
dev_eui = StringField("DevEUI", validators=[validators.InputRequired('Dev EUI is Required'), eui_validator])
app_eui = StringField("AppEUI", validators=[validators.InputRequired('APP EUI is Required'), eui_validator])
addr = StringField("DevAddr", validators=[validators.Optional(strip_whitespace=True), addr_validator], filters=[lambda x: x or None])
nwkskey = StringField("NWKSKEY", validators=[validators.InputRequired('Nwkskey is Required'), nwkskey_validator, ])
appskey = StringField("AppSKey", validators=[validators.Optional(strip_whitespace=True), appskey_validator, ], filters=[lambda x: x or None])
class OTAADev(Form):
name = StringField("Name", validators=[validators.Optional(strip_whitespace=True)])
dev_eui = StringField("DevEUI", validators=[validators.InputRequired('Dev EUI is Required'), eui_validator])
app_eui = StringField("AppEUI", validators=[validators.InputRequired('APP EUI is Required'), eui_validator])
appkey = StringField("AppKey", validators=[validators.Optional(strip_whitespace=True), appkey_validator, ], filters=[lambda x: x or None])
class Field:
def __init__(self, name='', validators=(), nullable=False):
self.name = name
self.validators = validators
self.nullable=nullable
def validate(self):
if self.nullable is True and self.data is None:
return True
else:
for validator in iter(self.validators):
validator(None, self)
class PatchDeviceForm:
name = Field(name='name')
addr = Field(name='addr', validators=[addr_validator, addr_available])
nwkskey = Field(name='nwkskey', validators=[nwkskey_validator, ])
appskey = Field(name='appskey', validators=[appskey_validator, ])
dev_class = Field(name='dev_class', validators=[class_type_validator, ])
fcnt_up = Field(name='fcnt_up', validators=[fcnt_validator, ])
fcnt_down = Field(name='fcnt_down', validators=[fcnt_validator, ])
que_down = Field(name='que_down')
check_fcnt = Field(name='check_fcnt', validators=[bool_validator, ])
adr = Field(name='adr', validators=[bool_validator, ])
appkey = Field(name='appkey', validators=[appkey_validator,], nullable=True)
def __init__(self, kwargs):
self.fields = []
for name, value in kwargs.items():
try:
field = getattr(self, name)
if isinstance(field, Field):
field.data = value
self.fields.append(field)
else:
raise PatchError('Application', name)
except AttributeError:
raise PatchError('Application', name)
def validator(self):
self.errors = {}
for field in self.fields:
try:
field.validate()
except StopValidation as error:
field.errors = [str(error), ]
self.errors[field.name] = field.errors
if len(self.errors) == 0:
return True
else:
return False
# class PatchDevice:
# __fields = (FieldDevice.name, FieldDevice.addr, FieldDevice.nwkskey,
# FieldDevice.appskey, FieldDevice.dev_class, FieldDevice.fcnt_up,
# FieldDevice.fcnt_down, 'que_down', FieldDevice.check_fcnt, FieldDevice.adr)
# @classmethod
# def patch(cls, device, kwargs):
# for name, value in kwargs.items():
# if name == 'que_down':
# device.que_down.clear()
# elif name in cls.__fields:
# if name == FieldDevice.dev_class:
# value = ClassType(value)
# elif name == FieldDevice.addr:
# Assertions.s_addr(value)
# value = unhexlify(value)
# elif name == FieldDevice.appskey:
# Assertions.s_appskey(value)
# value = unhexlify(value)
# elif name == FieldDevice.nwkskey:
# Assertions.s_nwkskey(value)
# value = unhexlify(value)
# setattr(device, name, value)
# elif name == 'appkey':
# if value is None:
# device.join_device = None
# else:
# device.join_device.appkey = unhexlify(value)
# else:
# raise PatchError('Application', name)
# device.update()
|
import fnmatch
import re
from pathlib import Path
from typing import Iterable, List
class PathList:
def __init__(self, root: Path, *paths):
self.root = root.resolve()
self.paths = []
self.events = []
if paths:
self.extend(paths)
def glob(self, pattern: str):
self.paths.extend(self.root.glob(pattern))
def rglob(self, pattern: str):
self.paths.extend(self.root.rglob(pattern))
def rfilter(self, pattern: str):
pattern = fnmatch.translate(pattern)
for path in self:
if re.match(pattern, str(path)):
self.paths.remove(path)
def append(self, obj) -> None:
if isinstance(obj, (str, Path)):
if obj in self.paths:
return
self.paths.append(Path(obj))
elif hasattr(obj, 'event'):
if obj in self.events:
return
self.events.append(obj)
else:
assert False
def extend(self, paths: Iterable[Path]):
if isinstance(paths, PathList):
for p in paths:
self.append(paths.root / p)
else:
for p in paths:
self.append(p)
def absolute(self) -> List[Path]:
return [self.root / path.as_posix() for path in self]
def __len__(self):
return self.paths.__len__()
def __getitem__(self, index) -> Path:
return self.paths.__getitem__(index)
def __setitem__(self, index, path: Path):
self.paths.__setitem__(index, self.__adjust__(path))
def __delitem__(self, index):
self.paths.__delitem__(index)
def __iter__(self) -> Iterable[Path]:
return self.paths.__iter__()
def __reversed__(self) -> Iterable[Path]:
return self.paths.__reversed__()
def __contains__(self, index) -> bool:
return self.paths.__contains__(index)
|
# coding=utf-8
#给你一个正整数列表 L, 如 L=[2,8,3,50], 判断列表内所有数字乘积的最后一个非零数字的奇偶性,
#奇数输出1,偶数输出0. 如样例输出应为0
L = [2,8,3,50]
res = 1
for item in L:
res *= item
while not res % 10:
res /= 10
print 1 if res % 2 else 0
|
"""
Util module
This module contains miscellaneous classes and constants that are used by the game engine
"""
import pygame
import math
SERVER = 0
CLIENT = 1
COMBINED = 2
with open('config') as f:
try:
configuration = {line.strip().split('=')[0] : line.strip().split('=')[1] for line in f}
DEFAULT_PORT = int(configuration.get('defaultport', 6658))
MAX_PLAYERS = int(configuration.get('maxplayers', 100))
FPS = int(configuration.get('maxfps', 60))
except IndexError:
raise SyntaxError('[ERROR] Invalid config file. Configuration cannot be loaded.')
except ValueError:
raise ValueError('[ERROR] Invalid values for settings in config file.')
#DEFAULT_PORT = 6658
#MAX_PLAYERS = 100
#FPS = 60
DISPLAY_FLAGS = pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.RESIZABLE
def calcChecksum(data):
"""
Calculate a checksum
"""
checksum = 0
for a in range(len(data)):
checksum += data[a]
return checksum.to_bytes(3, 'big')
def calcDistance(ent1, ent2):
"""
Calculate the distance between two entities
"""
dimensionDelta = (ent1.dimension-ent2.dimension) * 1000
deltaPos = [ent1.pos[a]-ent2.pos[a] for a in (0, 1)]
return (deltaPos[0]**2 + deltaPos[1]**2)**0.5 + dimensionDelta
def calcDirection(pos1, pos2):
"""
Calculate a direction between two positions
pos1 is current position
pos2 is previous position
"""
theta = math.atan2(pos1[0]-pos2[0], pos1[1]-pos2[1])/math.pi
return theta
class ArgumentHandler:
"""
An object to store and handle the command line arguments passed into the game at runtime
"""
def __init__(self, arguments):
self.results = {}
self.args = arguments
self.handleArgs()
def handleArgs(self):
"""
Process the arguments passed in, and set the results dictionary accordingly
"""
# Iterate the arguments and handle them
i = 0
while i < len(self.args):
arg = self.args[i]
# Handle the mod toggle argument
if arg == '--disableMods':
self.results['loadCustomMods'] = False
# Handle the runtime type argument, defaulting to server if invalid
elif arg == '--mode' and i != len(self.args)-1:
self.results['runtimeType'] = {'SERVER' : SERVER,
'CLIENT' : CLIENT,
'COMBINED' : COMBINED
}.get(self.args[i+1], SERVER)
del self.args[i+1]
# Handle the address and port arguments
elif arg == '--address' and i != len(self.args)-1:
self.results['address'] = self.args[i+1]
del self.args[i+1]
elif arg == "--seed" and i != len(self.args)-1:
try:
x = float(self.args[i+1])
except ValueError:
x = 0
self.results['seed'] = (4*x)/(x**2+1)
del self.args[i+1]
# Handle the AI argument
elif arg == '--enableSpecialAI':
self.results['specialAI'] = True
# Print a warning message if an unknown argument is given
else:
print('[WARNING] Unknown argument: {}'.format(arg))
del self.args[i]
def getRuntimeType(self):
"""
Return the type of game being run, either client, server, or combined
"""
return self.results.get('runtimeType', COMBINED)
def getRunSpecialAI(self):
"""
Return whether or not to run the neural network ai, as opposed to the normal ai
"""
return self.results.get('specialAI', False)
def getShouldLoadCustomMods(self):
"""
Return whether to load custom mods, beyond the default game
"""
return self.results.get('loadCustomMods', True)
def getSeed(self):
"""
Return the world generation seed
"""
return self.results.get('seed', 0)
def getConnectingAddress(self):
"""
Return the address that this client is going to connect to
"""
return self.results.get('address', '')
|
"""
This module contains the structures related to areas of interest.
"""
from typing import NamedTuple, Optional, Union
class AreaOfInterest(NamedTuple):
"""
.. versionadded:: 2.3
This is the area of interest for:
- Transformations
- Querying for CRS data.
"""
#: The west bound in degrees of the area of interest.
west_lon_degree: float
#: The south bound in degrees of the area of interest.
south_lat_degree: float
#: The east bound in degrees of the area of interest.
east_lon_degree: float
#: The north bound in degrees of the area of interest.
north_lat_degree: float
class AreaOfUse(NamedTuple):
"""
.. versionadded:: 2.0
Area of Use for CRS, CoordinateOperation, or a Transformer.
"""
#: West bound of area of use.
west: float
#: South bound of area of use.
south: float
#: East bound of area of use.
east: float
#: North bound of area of use.
north: float
#: Name of area of use.
name: Optional[str] = None
@property
def bounds(self):
return self.west, self.south, self.east, self.north
def __str__(self):
return f"- name: {self.name}\n" f"- bounds: {self.bounds}"
class BBox:
"""
Bounding box to check if data intersects/contains other
bounding boxes.
.. versionadded:: 3.0
"""
def __init__(self, west: float, south: float, east: float, north: float):
self.west = west
self.south = south
self.east = east
self.north = north
def intersects(self, other: Union["BBox", AreaOfUse]) -> bool:
"""
Parameters
----------
other: BBox
The other BBox to use to check.
Returns
-------
bool:
True if this BBox intersects the other bbox.
"""
return (
self.west < other.east
and other.west < self.east
and self.south < other.north
and other.south < self.north
)
def contains(self, other: Union["BBox", AreaOfUse]) -> bool:
"""
Parameters
----------
other: Union["BBox", AreaOfUse]
The other BBox to use to check.
Returns
-------
bool:
True if this BBox contains the other bbox.
"""
return (
other.west >= self.west
and other.east <= self.east
and other.south >= self.south
and other.north <= self.north
)
def __repr__(self) -> str:
return (
f"BBox(west={self.west},south={self.south},"
f"east={self.east},north={self.north})"
)
|
"""LimitlessLED Version 6 Bridge
"""
import types
import math
import logging
import time
from mookfist_lled_controller.colors import color_from_rgb
from mookfist_lled_controller.bridge import BaseBridge, BaseGroup, Command
import six
import binascii
GROUPS = (0, 1, 2, 3, 4, 'all')
def format_hex(i):
if i < 16:
i = hex(i).replace('0x', '0x0')
else:
i = hex(i)
return i
class CustomCommand(Command):
"""A ver6 Command class."""
def __init__(self, size):
super(CustomCommand, self).__init__(size)
self._set_preamble()
def _set_preamble(self):
self[0] = 0x80
self[1] = 0x00
self[2] = 0x00
self[3] = 0x00
self[4] = 0x11
self[7] = 0x00
self[8] = 0x02
self[9] = 0x00
def checksum(self):
"""Calculate the checksum value of command"""
return sum(bytearray(self._cmd[10:21])) & 0xff
class Bridge(BaseBridge):
"""Bridge API
Valid groups are 1 through 4. You can provide a list of groups
or use the word 'all' to send the command to all groups.
>>> bridge = Bridge('192.168.1.100')
>>> bridge.color(100, [1,2,4])
>>> bridge.brightness(50, 'all')
>>> bright.off(3)
Attributes:
ip: Bridge IP or hostname
port: Port number of the bridge
pause: Number of milliseconds to wait before sending a command
repeat: Number of times to resend a command
timeout: Socket timeout
group_class: Custom group class
"""
def __init__(self, ip, port=5987, *args, **kwargs):
BaseBridge.__init__(self, ip, port, *args, **kwargs)
self._group_cache = {}
self._Group = kwargs.get('group_class', Group)
self.logger = logging.getLogger('mlledctrl.bridge6')
self._wb1 = None
self._wb2 = None
self._cmd_counter = 0x01
self._last_set_group = None
def _init_group(self, group=1):
g = self._get_group(group)
return g
def _get_session_ids(self):
self.logger.debug('Getting session IDs')
cmd = Command(27)
cmd[0] = 0x20
cmd[1] = 0x00
cmd[2] = 0x00
cmd[3] = 0x00
cmd[4] = 0x16
cmd[5] = 0x02
cmd[6] = 0x62
cmd[7] = 0x3a
cmd[8] = 0xd5
cmd[9] = 0xed
cmd[10] = 0xa3
cmd[11] = 0x01
cmd[12] = 0xae
cmd[13] = 0x08
cmd[14] = 0x2d
cmd[15] = 0x46
cmd[16] = 0x61
cmd[17] = 0x41
cmd[18] = 0xa7
cmd[19] = 0xf6
cmd[20] = 0xdc
cmd[21] = 0xaf
cmd[22] = 0xf3
cmd[23] = 0xf7
cmd[24] = 0x00
cmd[25] = 0x00
cmd[26] = 0x1e
db = self._send_raw(cmd)
if db:
self.logger.debug('Response: %s' % [binascii.hexlify(db[i:i+1]) for i in range(0, len(db), 1)])
self._wb1 = db[19]
self._wb2 = db[20]
self.logger.debug('Session ID: %s %s' % (format_hex(self._wb1), format_hex(self._wb2)))
self._confirm_init()
def _confirm_init(self):
cmd = Command(23)
cmd[0] = 0x80
cmd[1] = 0x00
cmd[2] = 0x00
cmd[3] = 0x00
cmd[4] = 0x11
cmd[5] = self._wb1
cmd[6] = self._wb2
cmd[7] = 0x00
cmd[8] = self._cmd_counter
cmd[9] = 0x00
cmd[10] = 0x33
cmd[11] = 0x00
cmd[12] = 0x00
cmd[13] = 0x00
cmd[14] = 0x00
cmd[15] = 0x00
cmd[16] = 0x00
cmd[17] = 0x00
cmd[18] = 0x00
cmd[19] = 0x00
cmd[20] = 0x00
cmd[21] = 0x00
cmd[22] = 0x33
self.logger.debug('Confirming initialization')
data = self._send_raw(cmd)
def _get_group(self, group):
if group not in self._group_cache:
self._group_cache[group] = self._Group(group)
return self._group_cache[group]
def _send_raw(self, cmd):
self.logger.debug('Sending command: %s' % cmd.message_str())
self._sock.sendto(cmd.message(), (self.ip, self.port))
data = bytearray(self._sock.recv(1024))
self.logger.debug('Response: %s' % [binascii.hexlify(data[i:i+1]) for i in range(0, len(data), 1)])
self._cmd_counter = (self._cmd_counter + 1) % 255
time.sleep(self.pause)
return data
def _send(self, cmd, group=1):
if type(cmd) == CustomCommand:
cmds = [cmd,]
else:
cmds = cmd
for cmd in cmds:
if self._wb1 == None or self._wb2 == None:
self._get_session_ids()
for x in range(0, self.repeat):
cmd[5] = self._wb1
cmd[6] = self._wb2
cmd[7] = 0x00
cmd[8] = self._cmd_counter
cmd[9] = 0x00
cmd[21] = cmd.checksum()
self._send_raw(cmd)
class Group(BaseGroup):
"""Represents a group of lights"""
def __init__(self, group, bulbtype=0x07):
if group == 'all':
self.group = 0
else:
self.group = int(group)
self.bulbtype = bulbtype
self._command_counter = 0
def _prepare_cmd(self):
cmd = CustomCommand(22)
cmd[10] = 0x31
cmd[11] = 0x00
cmd[12] = 0x00
cmd[13] = self.bulbtype
cmd[16] = 0x00
cmd[17] = 0x00
cmd[18] = 0x00
cmd[19] = self.group
cmd[20] = 0x00
return cmd
def on(self):
cmd = self._prepare_cmd()
cmd[14] = 0x03
cmd[15] = 0x01
return cmd
def off(self):
cmd = self._prepare_cmd()
cmd[14] = 0x03
cmd[15] = 0x02
return cmd
def color_rgb(self, r, g, b):
if r == 255 and b == 255 and g == 255:
cmd = (self.white(), self.brightness(100))
elif r == 0 and b == 0 and g == 0:
cmd = self.off()
elif r == b and b == g and g == r:
brightness = int(math.ceil((r / 255.0) * 100.0))
cmd = (self.white(), self.brightness(brightness))
else:
color = color_from_rgb(r, b, g, 0.35/3.0)
cmd = self.color(int(color))
return cmd
def color(self, color):
cmd = self._prepare_cmd()
cmd[14] = 0x01
cmd[15] = color
cmd[16] = color
cmd[17] = color
cmd[18] = color
return cmd
def white(self):
cmd = self._prepare_cmd()
cmd[14] = 0x03
cmd[15] = 0x05
return cmd
def brightness(self, brightness):
cmd = self._prepare_cmd()
cmd[14] = 0x02
cmd[15] = brightness
return cmd
|
"""
source: https://stanford.edu/~shervine/blog/pytorch-how-to-generate-data-parallel
"""
import os
import time
import cv2
import numpy as np
import shutil
import sys
from numpy.core.fromnumeric import argmax
import torch
import torch.optim as optim
import torchvision
from dataset.BDD100k import BDDDataset
from model import SCNN
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from utils.lr_scheduler import PolyLR
#from utils.transforms import *
import matplotlib.pyplot as plt
from tqdm import tqdm #progress bar
#import pdb pdb.set_trace() for debugging
# Directory settings
working_dir = "C:/Users/ynfuc/Documents/Masterarbeit/.vscode/BDD100k_implements/SCNN_Pytorch"
bdd100k_train_img_path = working_dir + "/dataset/images/train/"
bdd100k_train_dl_path = working_dir + "/dataset/drivable_area/labels_dl/train/"
bdd100k_val_img_path = working_dir + "/dataset/images/val/"
bdd100k_val_dl_path = working_dir + "/dataset/drivable_area/labels_dl/val/"
exp_dir = working_dir + "/experiments/exp2/drivable/"
exp_name = "t001"
# CUDA for PyTorch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print("Use device: ", device)
torch.backends.cudnn.benchmark = True
#Data loader parameters
params = {"batch_size": 20, "shuffle": True, "num_workers": 4, "pin_memory": True}
max_epoch = 100
resize_shape = tuple([512, 288])
optim_set = {"lr": 0.01, "momentum": 0.9, "weight_decay": 1e-4, "nesterov": True}
lr_set = {"warmup": 200, "max_iter": 15000, "min_lrs": 1e-05}
# Define training dataset and data loader
train_bdd100k = BDDDataset(image_path=bdd100k_train_img_path, drivable_path = bdd100k_train_dl_path)
train_bdd100k_dataset_loader = DataLoader(dataset=train_bdd100k, **params)
# Define validation dataset and data loader
val_bdd100k = BDDDataset(image_path=bdd100k_val_img_path, drivable_path = bdd100k_val_dl_path)
val_bdd100k_dataset_loader = DataLoader(dataset=val_bdd100k, **params)
#Declare model & optimizers
net = SCNN(resize_shape, pretrained=True)
net = net.to(device)
#torch.distributed.init_process_group("gloo", rank=rank, world_size=world_size)
#torch.cuda.set_device()
#net = torch.nn.parallel.DistributedDataParallel(net)
#net = torch.nn.DataParallel(net)
#
#net.eval()
tensorboard = SummaryWriter(exp_dir + "tb/")
optimizer = optim.SGD(net.parameters(), **optim_set)
lr_scheduler = PolyLR(optimizer, 0.9, **lr_set)
best_val_loss = 1000
#@profile
def train(epoch):
print("Train Epoch: {}".format(epoch))
net.train()
train_loss = 0
train_loss_seg = 0
##train_loss_exist = 0
epoch_accuracy = 0
progressbar = tqdm(range(len(train_bdd100k_dataset_loader)))
#Training loop
for batch_idx, sample in enumerate(train_bdd100k_dataset_loader):
# move to GPU
img = sample['image'].to(device)
segLabel = sample['label'].to(device)
#null gradient, get model output
optimizer.zero_grad()
seg_pred, exist_pred, loss_seg, loss_exist = net(img, segLabel) # loss
loss_seg = loss_seg.sum()
loss_seg.requres_grad = True
#loss_exist = loss_exist.sum()
#loss = loss.sum()
#loss.requres_grad = True
#backprop, grad, learning rate update
loss_seg.backward()
optimizer.step()
lr_scheduler.step()
iter_idx = epoch * len(train_bdd100k_dataset_loader) + batch_idx
#train_loss = loss.item()
train_loss_seg = loss_seg.item()
#train_loss_exist = loss_exist.item()
#Calculate accuracy
predicted = torch.argmax(seg_pred.data, dim=1) #returns sec arg of torch.max
correct_train = predicted.eq(segLabel.data).sum().item()
accuracy = 100 * correct_train / segLabel.numel()
#Save epoch accuracy in tensorboard
epoch_accuracy +=accuracy
if batch_idx >= (len(train_bdd100k_dataset_loader)-1):
tensorboard.add_scalar("accuracy", epoch_accuracy, iter_idx)
progressbar.set_description("batch loss: {:.3f}".format(loss_seg.item()))
progressbar.update(1)
lr = optimizer.param_groups[0]['lr']
tensorboard.add_scalar("train_loss", train_loss, iter_idx)
tensorboard.add_scalar("learning_rate", lr, iter_idx)
"""
print("img size: ", img.size(0), "label size: ", segLabel.size(0))
print("img size: ", type(img.size(0)), "label size: ", type(segLabel.size(0)))
print("same: ", img.size(0)==segLabel.size(0), "diff: ", img.size(0)!=segLabel.size(0))
"""
#tensorboard.add_graph(net, input_to_model=img, verbose=False)
progressbar.close()
tensorboard.flush()
#Save model & settings in exp_name.pth
if epoch % 1 == 0:
save_dict = {
"epoch": epoch,
"net": net.module.state_dict() if isinstance(net, torch.nn.DataParallel) else net.state_dict(),
"optim": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"best_val_loss": best_val_loss
}
save_name = os.path.join(exp_dir, exp_name + '.pth')
torch.save(save_dict, save_name)
print("model is saved: {}".format(save_name))
print("------------------------\n")
"""
#average trainloss calc + print every 100 batches
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
if batch_idx % 100 == 0:
print('Epoch %d, Batch %d loss: %.6f' %(epoch, batch_idx + 1, train_loss))
"""
def val(epoch):
global best_val_loss
net.eval()
print("Val Epoch: {}".format(epoch))
net.eval()
val_loss = 0
val_loss_seg = 0
#val_loss_exist = 0 #CBE_loss not available for BDD100k
progressbar = tqdm(range(len(val_bdd100k_dataset_loader)))
#Validation
with torch.set_grad_enabled(False):
total_train = 0
correct_train = 0
epoch_accuracy = 0
for batch_idx, sample in enumerate(val_bdd100k_dataset_loader):
#Transfer to GPU
img = sample['image'].to(device)
segLabel = sample['label'].to(device)
#exist = sample['exist'].cuda()
#local_batch, local_labels = local_batch.to(device), local_labels.to(device)
seg_pred, exist_pred, loss_seg, loss_exist, loss = net(img, segLabel)
loss_seg = loss_seg.sum()
#loss_exist = loss_exist.sum()
loss = loss.sum()
predicted = torch.argmax(seg_pred.data, dim=1) #returns sec arg of torch.max
#print(total_train, predicted.shape, segLabel.shape)
correct_train = predicted.eq(segLabel.data).sum().item()
accuracy = 100 * correct_train / segLabel.numel()
predict = predicted.eq(segLabel) #True/False übereinstimmung
np.set_printoptions(threshold=sys.maxsize)
#print("Variante1: {:.3f}".format(accuracy))
epoch_accuracy +=accuracy
if batch_idx >= (len(train_bdd100k_dataset_loader)-1):
tensorboard.add_scalar("val_accuracy", epoch_accuracy, epoch)
"""
https://www.kaggle.com/devilsknight/malaria-detection-with-pytorch
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
"""
"""
#prediction plot
#print("Label: ",segLabel[0,110:190,210:290].numpy())
#print("predic: ",predicted[0,110:190,210:290].numpy())
#print("Compare: ",predict[0,110:190,210:290].numpy())
print (seg_pred.shape)
f = plt.figure()
f.add_subplot(2,2,1)
plt.imshow(img[0].permute(1,2,0))
f.add_subplot(2,2,2)
plt.imshow(segLabel[0])
f.add_subplot(2,2,3)
plt.imshow(predicted[0])
f.add_subplot(2,2,4)
plt.imshow(predict[0].detach().cpu())
plt.show(block=True)
#plt.pause(5)
"""
if batch_idx ==0:
#val_images = [img[0].permute(1,2,0), segLabel[0], predicted[0], predict[0].detach().cpu()]
tensorboard.add_image("Image: ", img[0], global_step=epoch, dataformats='CHW')
"""
tensorboard.add_image("Image_gt: ", segLabel[0], global_step=epoch, dataformats='HW')
tensorboard.add_image("Image_predicted: ", predicted[0], global_step=epoch, dataformats='HW')
tensorboard.add_image("Image_compare: ", predict[0].detach().cpu(), global_step=epoch, dataformats='HW')
"""
img_grid = torchvision.utils.make_grid([segLabel[0], predicted[0], predict[0].detach().cpu()])
tensorboard.add_image("Val_Images: ", img_grid, global_step=epoch, dataformats='CHW')
# visualize validation every 5 frame, 50 frames in all
#gap_num = 25
#if batch_idx%gap_num == 0 and batch_idx < 50 * gap_num:
origin_imgs = []
#seg_pred = seg_pred.detach().cpu().numpy()
if batch_idx ==0:
#np.set_printoptions(threshold=sys.maxsize)
#print(net)
#print(segLabel[0,180:230,450:512].numpy())
#print(seg_pred[0,0,180:230,450:512].numpy())
#plt.imshow(predict[0].detach().cpu())
#plt.pause(5)
"""
#plt.subplot(1, 6, 1)
plt.imshow(segLabel[0])
#plt.pause(5)
for i in range (0, 3):
plt.figure()
print(seg_pred.shape)
plt.imshow(torch.exp(seg_pred[0, i, :, :]).detach().numpy())
plt.pause(5)
plt.close()
"""
#exist_pred = exist_pred.detach().cpu().numpy()
val_loss += loss.item()
val_loss_seg += loss_seg.item()
#val_loss_exist += loss_exist.item()
progressbar.set_description("batch loss: {:.3f}".format(loss.item()))
progressbar.update(1)
progressbar.close()
iter_idx = (epoch + 1) * len(train_bdd100k_dataset_loader) # keep align with training process iter_idx
tensorboard.add_scalar("val_loss", val_loss, iter_idx)
tensorboard.add_scalar("val_loss_seg", val_loss_seg, iter_idx)
#tensorboard.scalar_summary("val_loss_exist", val_loss_exist, iter_idx)
tensorboard.flush()
print("------------------------\n")
if val_loss < best_val_loss:
best_val_loss = val_loss
save_name = os.path.join(exp_dir, exp_name + '.pth')
copy_name = os.path.join(exp_dir, exp_name + '_best.pth')
shutil.copyfile(save_name, copy_name)
#Model computions
def main():
global best_val_loss
resume = False
if resume:
save_dict = torch.load(os.path.join(exp_dir, exp_name + '.pth'))
if isinstance(net, torch.nn.DataParallel):
net.module.load_state_dict(save_dict['net'])
else:
net.load_state_dict(save_dict['net'])
optimizer.load_state_dict(save_dict['optim'])
lr_scheduler.load_state_dict(save_dict['lr_scheduler'])
start_epoch = save_dict['epoch'] + 1
best_val_loss = save_dict.get("best_val_loss", 1e6)
else:
start_epoch = 0
for epoch in range (start_epoch, max_epoch):
train(epoch)
#val(epoch)
if epoch % 1 == 0:
print("\nValidation For Experiment: ", exp_dir)
print(time.strftime('%H:%M:%S', time.localtime()))
val(epoch)
if __name__ == "__main__":
main()
"""
probs = torch.log_softmax(seg_pred, dim = 1)
_, tags = torch.max(probs, dim = 1)
corrects = torch.eq(tags,segLabel).int()
acc = corrects.sum()/corrects.numel()
acc = acc * 100
print("Variante2: ",float(acc))
"""
#for images, labels in train_bdd100k_dataset_loader:
#Feed the data to the model
|
import base64
import json
import pytest
from unittest import mock
from actions import user_actions
def test_protect_resources():
"""Verify exception when acting on protected resources."""
with pytest.raises(SystemExit):
user_actions.protect_resources("admin")
with pytest.raises(SystemExit):
user_actions.protect_resources("kubelet-X")
def test_user_list():
"""Verify user data is parsed correctly from our secrets."""
user = secret_id = "admin"
test_data = {
"items": [
{
"metadata": {
"name": secret_id,
},
"data": {
"username": base64.b64encode(user.encode("utf-8")).decode("utf-8"),
},
}
]
}
secrets = json.dumps(test_data).encode("utf-8")
# we expect a {username: secret_id} dict
with mock.patch(
"actions.user_actions.layer.kubernetes_common.kubectl", return_value=secrets
):
secret_data = user_actions.user_list()
assert user in secret_data.keys()
assert secret_id in secret_data.values()
@mock.patch("actions.user_actions.os.chmod")
@mock.patch("actions.user_actions.layer.kubernetes_common")
@mock.patch("actions.user_actions.layer.kubernetes_master")
@mock.patch("actions.user_actions.action_get")
def test_user_create(mock_get, mock_master, mock_common, mock_chmod):
"""Verify expected calls are made when creating a user."""
user = secret_id = "testuser"
test_data = {user: secret_id}
# Ensure failure when user exists
mock_get.return_value = user
with mock.patch("actions.user_actions.user_list", return_value=test_data):
user_actions.user_create()
assert user_actions.action_fail.called
# Ensure failure when user name is invalid
mock_get.return_value = "FunnyBu;sness"
with mock.patch("actions.user_actions.user_list", return_value=test_data):
user_actions.user_create()
assert user_actions.action_fail.called
# Ensure calls/args when we have a new user
user = "newuser"
password = "password"
token = "{}::{}".format(user, password)
mock_get.return_value = user
mock_master.token_generator.return_value = password
mock_master.get_api_endpoint.return_value = [1, 1]
with mock.patch("actions.user_actions.user_list", return_value=test_data):
user_actions.user_create()
args, kwargs = mock_master.create_secret.call_args
assert token in args
args, kwargs = mock_common.create_kubeconfig.call_args
assert token in kwargs["token"]
@mock.patch("actions.user_actions.layer.kubernetes_master")
@mock.patch("actions.user_actions.action_get")
def test_user_delete(mock_get, mock_master):
"""Verify expected calls are made when deleting a user."""
user = secret_id = "testuser"
test_data = {user: secret_id}
# Ensure failure when user does not exist
mock_get.return_value = "missinguser"
with mock.patch("actions.user_actions.user_list", return_value=test_data):
user_actions.user_delete()
assert user_actions.action_fail.called
# Ensure calls/args when we have a valid user
mock_get.return_value = user
with mock.patch("actions.user_actions.user_list", return_value=test_data):
user_actions.user_delete()
args, kwargs = mock_master.delete_secret.call_args
assert secret_id in args
|
# Copyright (c) 2017 NEC Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import signal
import fixtures
import mock
from neutron.common import utils
from oslo_db import exception
from oslo_log import log as logging
from oslo_service.tests import test_service
from oslo_utils import uuidutils
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.db import models
from networking_odl.journal import cleanup
from networking_odl.journal import dependency_validations
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.journal import periodic_task
from networking_odl.journal import recovery
from networking_odl.journal import worker
from networking_odl.tests import base
from networking_odl.tests.unit import base_v2
from networking_odl.tests.unit.db import test_db
PROCESS_RUNNING_STATUSES = ('S', 'R', 'D')
class JournalPeriodicProcessorTest(base_v2.OpenDaylightConfigBase,
test_service.ServiceTestBase):
def setUp(self):
super(JournalPeriodicProcessorTest, self).setUp()
self.periodic_task_fixture = self.useFixture(
base.OpenDaylightPeriodicTaskFixture())
self.cfg.config(sync_timeout=0.1, group='ml2_odl')
def _create_periodic_processor(self):
periodic_processor = worker.JournalPeriodicProcessor()
self.addCleanup(periodic_processor.stop)
return periodic_processor
def _get_pid_status(self, pid):
"""Allows to query a system process based on the PID
It will use `ps` to query the pid, it's state and the command.
:param pid: An integer with the Process ID number
:returns: A tuple of strings with the command and the running status
in a single char as defined in the manpage PS(1) under
PROCESS STATE CODES.
"""
with os.popen('ps ax -o pid,state,cmd') as f:
# Skip ps header
f.readline()
processes = (l.strip().split()[:3] for l in f)
return next(((c, s) for p, s, c in processes if int(p) == pid),
(None, None))
def _kill_process(self, pid):
if self._get_pid_status(pid)[1] in PROCESS_RUNNING_STATUSES:
os.kill(pid, signal.SIGKILL)
def mock_object_with_ipc(self, target, attribute, pre_hook=None):
patcher = mock.patch.object(target, attribute, autospec=True)
c2p_read = self.create_ipc_for_mock(patcher, pre_hook)
return c2p_read
def create_ipc_for_mock(self, patcher, pre_hook=None):
# NOTE(mpeterson): The following pipe is being used because this is
# testing something inter processeses and we need to have a value on
# the side of the test processes to know it succeeded with the
# operation. A pipe provide a way for two processes to communicate.
# The was_called method will be called by the worker process while
# the test process will read the result on c2p_read.
c2p_read, c2p_write = os.pipe()
def close_pipe_end(fd):
try:
os.close(fd)
except OSError:
print('failed closing: %s' % fd)
# First we want to close the write, to unlock any running read()
self.addCleanup(close_pipe_end, c2p_read)
self.addCleanup(close_pipe_end, c2p_write)
mock_ = patcher.start()
self.addCleanup(patcher.stop)
def was_called(*args, **kwargs):
# OSError is caught because start is called twice on the worker
# and the second time the pipe is already closed.
try:
os.close(c2p_read)
try:
if pre_hook:
pre_hook(*args, **kwargs)
os.write(c2p_write, b'1')
except Exception:
# This is done so any read on the pipe is unblocked.
os.write(c2p_write, b'0')
finally:
os.close(c2p_write)
except OSError:
pass
mock_.side_effect = was_called
return c2p_read
def assert_ipc_mock_called(self, c2p_read):
# If it timeouts on the read then it means the function was
# not called.
called = int(os.read(c2p_read, 1))
self.assertEqual(called, 1,
'The IPC mock was called but during the '
'execution an exception was raised')
@mock.patch.object(journal.OpenDaylightJournalThread, 'set_sync_event')
def test_processing(self, mock_journal):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
utils.wait_until_true(lambda: mock_journal.call_count > 1, 5, 0.1)
@mock.patch.object(journal.OpenDaylightJournalThread, 'start')
@mock.patch.object(journal.OpenDaylightJournalThread, 'stop')
def test_stops_journal_sync_thread(self, mock_stop, mock_start):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor.stop()
mock_stop.assert_called_once()
mock_start.assert_called_once()
def test_allow_multiple_starts_gracefully(self):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor.stop()
try:
periodic_processor.start()
except RuntimeError:
self.fail('Calling a start() after a stop() should be allowed')
def test_multiple_starts_without_stop_throws_exception(self):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
self.assertRaises(RuntimeError, periodic_processor.start)
def test_call_stop_without_calling_start(self):
periodic_processor = self._create_periodic_processor()
try:
periodic_processor.stop()
except AttributeError:
self.fail('start() was not called before calling stop()')
def assert_process_running(self, pid):
cmd, state = self._get_pid_status(pid)
self.assertIn(state, PROCESS_RUNNING_STATUSES)
return cmd
def _create_periodic_processor_ipc_fork(self, target, pre_hook=None):
self._setup_mocks_for_periodic_task()
real_start = worker.JournalPeriodicProcessor.start
pipe_start = self.mock_object_with_ipc(worker.JournalPeriodicProcessor,
'start', real_start)
c2p_read = self.mock_object_with_ipc(worker.JournalPeriodicProcessor,
target, pre_hook)
pid = self._spawn_service(
service_maker=lambda: worker.JournalPeriodicProcessor())
self.addCleanup(self._kill_process, pid)
# Allow the process to spawn and signal handling to be registered
self.assert_ipc_mock_called(pipe_start)
return pid, c2p_read
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops',
new=mock.Mock())
@mock.patch.object(journal.OpenDaylightJournalThread,
'sync_pending_entries', new=mock.Mock())
def test_handle_sighup_gracefully(self):
real_reset = worker.JournalPeriodicProcessor.reset
pid, c2p_read = self._create_periodic_processor_ipc_fork('reset',
real_reset)
cmd = self.assert_process_running(pid)
os.kill(pid, signal.SIGHUP)
self.assert_ipc_mock_called(c2p_read)
new_cmd = self.assert_process_running(pid)
self.assertEqual(cmd, new_cmd)
def _setup_mocks_for_periodic_task(self, executed_recently=False):
mock_db_module = mock.MagicMock(spec=db)
mock_db_module.was_periodic_task_executed_recently.return_value = \
executed_recently
mock_db = mock.patch('networking_odl.journal.periodic_task.db',
mock_db_module)
mock_db.start()
self.addCleanup(mock_db.stop)
@mock.patch.object(cleanup, 'delete_completed_rows')
@mock.patch.object(cleanup, 'cleanup_processing_rows')
@mock.patch.object(full_sync, 'full_sync')
@mock.patch.object(recovery, 'journal_recovery')
# ^^ The above mocks represent the required calling order starting from
# top. Use decorators *only* to specify the stack order.
def test_maintenance_task_correctly_registered(self, *stack_order):
calls = []
for item in reversed(stack_order):
calls.append(mock.call(item))
with mock.patch.object(
periodic_task.PeriodicTask,
'register_operation') as register_operation_mock:
periodic_processor = self._create_periodic_processor()
periodic_processor._start_maintenance_task()
register_operation_mock.assert_has_calls(calls)
def test_maintenance_task_started(self):
self.periodic_task_fixture.task_start_mock.stop()
mock_start = self.periodic_task_fixture.task_start_mock.start()
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor._maintenance_task = mock.MagicMock()
mock_start.assert_called_once()
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops',
new=mock.Mock())
def test_reset_called_on_sighup(self):
pid, c2p_read = self._create_periodic_processor_ipc_fork('reset')
self.assert_process_running(pid)
os.kill(pid, signal.SIGHUP)
self.assert_ipc_mock_called(c2p_read)
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops')
def test_reset_fires_maintenance_task(self, execute_mock):
periodic_processor = self._create_periodic_processor()
periodic_processor._start_maintenance_task()
execute_mock.reset_mock()
periodic_processor.reset()
execute_mock.assert_has_calls([mock.call(forced=True)])
def test_reset_succeeeds_when_maintenance_task_not_setup(self):
periodic_processor = self._create_periodic_processor()
# NOTE(mpeterson): This tests that if calling reset without setting up
# the maintenance task then it would not raise an exception and just
# proceed as usual.
periodic_processor.reset()
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops')
def test_start_fires_maintenance_task(self, execute_mock):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
execute_mock.called_once_with([mock.call(forced=True)])
def test_creates_pidfile(self):
periodic_processor = self._create_periodic_processor()
periodic_processor._create_pidfile()
pidfile = str(periodic_processor.pidfile)
self.assertTrue(os.path.isfile(pidfile))
with open(pidfile) as f:
pid = int(f.readline())
self.assertEqual(pid, os.getpid())
# NOTE(mpeterson): to avoid showing an expected exception while
# running the next assert
with mock.patch('neutron.agent.linux.daemon.LOG', autospec=True):
self.assertRaises(
SystemExit,
worker.JournalPeriodicProcessor()._create_pidfile
)
@mock.patch.object(worker.JournalPeriodicProcessor, '_create_pidfile')
@mock.patch.object(worker.JournalPeriodicProcessor, '_delete_pidfile')
def test_pidfile_handling_on_start_stop(self, mock_create, mock_delete):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor.stop()
mock_create.assert_called_once()
mock_delete.assert_called_once()
def test_deletes_pidfile(self):
atexit_mock = self.journal_thread_fixture.remock_atexit()
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
pidfile = str(periodic_processor.pidfile)
self.assertTrue(os.path.isfile(pidfile))
periodic_processor._delete_pidfile()
self.assertFalse(os.path.isfile(pidfile))
atexit_mock.assert_called_once_with(periodic_processor._delete_pidfile)
def test_atexit_delete_pidfile_registered_only_once(self):
atexit_mock = self.journal_thread_fixture.remock_atexit()
periodic_processor = self._create_periodic_processor()
for _ in range(0, 2):
periodic_processor.start()
periodic_processor.stop()
atexit_mock.assert_called_once()
class OpenDaylightJournalThreadTest(base_v2.OpenDaylightTestCase):
def setUp(self):
super(OpenDaylightJournalThreadTest, self).setUp()
self.journal = journal.OpenDaylightJournalThread()
self.addCleanup(self.cleanup)
@staticmethod
def cleanup():
journal.MAKE_URL.clear()
def test_json_data(self):
object_type = 'testobject'
data = 'testdata'
row = models.OpenDaylightJournal(object_type=object_type,
object_uuid=uuidutils.generate_uuid(),
operation=odl_const.ODL_CREATE,
data=data)
self.assertEqual("%ss" % object_type, self.journal._json_data(row)[1])
def test_json_data_customized_url(self):
object_type = 'randomtestobject'
data = 'testdata'
journal.register_url_builder(object_type, lambda row: row.object_type)
row = models.OpenDaylightJournal(object_type=object_type,
object_uuid=uuidutils.generate_uuid(),
operation=odl_const.ODL_CREATE,
data=data)
url_param = self.journal._json_data(row)
self.assertEqual(object_type, url_param[1])
def test_entry_reset_retries_exceptions(self):
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(journal.entry_reset, m)
@test_db.in_session
@mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
mock.Mock(side_effect=Exception))
def test__sync_entry_update_state_by_retry_count_on_exception(self):
entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
self.journal._max_retry_count = 1
self.assertEqual(entry.retry_count, 0)
self.journal._sync_entry(self.db_context, entry)
self.assertEqual(entry.retry_count, 1)
self.assertEqual(entry.state, odl_const.PENDING)
self.journal._sync_entry(self.db_context, entry)
self.assertEqual(entry.retry_count, 1)
self.assertEqual(entry.state, odl_const.FAILED)
def _test__sync_entry_logs(self, log_type):
entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
logger = self.useFixture(fixtures.FakeLogger())
self.journal._sync_entry(self.db_context, entry)
self.assertIn(log_type, logger.output)
def test__sync_entry_logs_processing(self):
self._test__sync_entry_logs(journal.LOG_PROCESSING)
def test__sync_entry_logs_completed(self):
self._test__sync_entry_logs(journal.LOG_COMPLETED)
@mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
mock.Mock(side_effect=Exception))
def test__sync_entry_logs_failed(self):
self._test__sync_entry_logs(journal.LOG_ERROR_PROCESSING)
@mock.patch.object(journal.OpenDaylightJournalThread,
'sync_pending_entries')
def test_terminate_journal_thread_correctly(self, mock_journal):
self.journal_thread_fixture.journal_thread_mock.stop()
self.addCleanup(self.journal_thread_fixture.journal_thread_mock.start)
journal_thread = journal.OpenDaylightJournalThread(start_thread=True)
journal_thread.stop(5)
self.assertTrue(not journal_thread._odl_sync_thread.is_alive())
mock_journal.assert_called_once()
@mock.patch.object(journal.OpenDaylightJournalThread,
'sync_pending_entries')
def test_allow_multiple_starts_gracefully(self, mock_journal):
self.journal_thread_fixture.journal_thread_mock.stop()
self.addCleanup(self.journal_thread_fixture.journal_thread_mock.start)
journal_thread = journal.OpenDaylightJournalThread(start_thread=False)
self.addCleanup(journal_thread.stop)
journal_thread.start()
try:
journal_thread.start()
except RuntimeError:
self.fail('OpenDaylightJournalThread started twice')
def _raise_DBReferenceError(*args, **kwargs):
args = [mock.Mock(unsafe=True)] * 4
e = exception.DBReferenceError(*args)
raise e
class JournalTest(base_v2.OpenDaylightTestCase):
@mock.patch.object(dependency_validations, 'calculate')
@mock.patch.object(journal.db, 'create_pending_row',
side_effect=_raise_DBReferenceError)
def test_record_triggers_retry_on_reference_error(self, mock_create_row,
mock_calculate):
args = [mock.Mock(unsafe=True)] * 5
self.assertRaises(exception.RetryRequest, journal.record, *args)
def test_entry_complete_retries_exceptions(self):
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(journal.entry_complete, m)
@test_db.in_session
def _test_entry_complete(self, retention, expected_length):
self.cfg.config(completed_rows_retention=retention, group='ml2_odl')
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry = db.get_all_db_rows(self.db_context)[-1]
journal.entry_complete(self.db_context, entry)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(expected_length, len(rows))
self.assertTrue(
all(row.state == odl_const.COMPLETED for row in rows))
def test_entry_complete_no_retention(self):
self._test_entry_complete(0, 0)
def test_entry_complete_with_retention(self):
self._test_entry_complete(1, 1)
def test_entry_complete_with_indefinite_retention(self):
self._test_entry_complete(-1, 1)
@test_db.in_session
def test_entry_complete_with_retention_deletes_dependencies(self):
self.cfg.config(completed_rows_retention=1, group='ml2_odl')
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry = db.get_all_db_rows(self.db_context)[-1]
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW,
depending_on=[entry])
dependant = db.get_all_db_rows(self.db_context)[-1]
journal.entry_complete(self.db_context, entry)
rows = db.get_all_db_rows(self.db_context)
self.assertIn(entry, rows)
self.assertEqual([], entry.dependencies)
self.assertEqual([], dependant.depending_on)
def test_entry_reset_retries_exceptions(self):
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(journal.entry_reset, m)
@test_db.in_session
def test_entry_reset(self):
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry = db.get_all_db_rows(self.db_context)[-1]
entry.state = odl_const.PROCESSING
self.db_context.session.merge(entry)
self.db_context.session.flush()
entry = db.get_all_db_rows(self.db_context)[-1]
self.assertEqual(entry.state, odl_const.PROCESSING)
journal.entry_reset(self.db_context, entry)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(2, len(rows))
self.assertTrue(all(row.state == odl_const.PENDING for row in rows))
def test_entry_set_retry_count_retries_exceptions(self):
with mock.patch.object(db, 'update_pending_db_row_retry') as m:
self._test_retry_exceptions(
journal.entry_update_state_by_retry_count, m)
@test_db.in_session
def test_entry_set_retry_count(self):
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry_baseline = db.get_all_db_rows(self.db_context)[-1]
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry_target = db.get_all_db_rows(self.db_context)[-1]
self.assertEqual(entry_target.retry_count, 0)
self.assertEqual(entry_target.retry_count, entry_baseline.retry_count)
self.assertEqual(entry_target.state, entry_baseline.state)
journal.entry_update_state_by_retry_count(
self.db_context, entry_target, 1)
self.assertEqual(entry_target.retry_count, 1)
self.assertEqual(entry_target.state, odl_const.PENDING)
journal.entry_update_state_by_retry_count(
self.db_context, entry_target, 1)
self.assertEqual(entry_target.retry_count, 1)
self.assertEqual(entry_target.state, odl_const.FAILED)
self.assertNotEqual(entry_target.state, entry_baseline.state)
self.assertNotEqual(entry_target.retry_count,
entry_baseline.retry_count)
def test_record_logs_recording(self):
logger = self.useFixture(fixtures.FakeLogger())
journal.record(self.db_context, *self.UPDATE_ROW)
for arg in self.UPDATE_ROW[0:3]:
self.assertIn(arg, logger.output)
def test_record_logs_dependencies(self):
entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
journal.record(self.db_context, *self.UPDATE_ROW)
self.assertIn(str(entry.seqnum), logger.output)
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from pycoin.key.BIP32Node import BIP32Node
from pycoin.cmds.ku import get_entropy
from pycoin.networks.default import get_current_netcode
from first_page import Ui_first
from ..encoding.encoding import get_chain_secret_pair
import sys
from second_page import Ui_second
from PyQt5.QtWidgets import QTreeWidgetItem
from pycoin.key.BIP32Node import BIP32Node
from .keychain import KeyChain
from .widgets import PubKeyTreeWidgetItem, PrivKeyTreeWidgetItem
class App(object):
def __init__(self):
self.app = QtWidgets.QApplication(sys.argv)
self.first = QtWidgets.QDialog()
self.second = QtWidgets.QDialog()
self.ui = Ui_first()
self.ui.setupUi(self.first)
self.ui.gen_key_button.clicked.connect(self.gen_seed)
self.ui.secret_exponent.textChanged.connect(self.detect_text)
self.ui.next_button.clicked.connect(self.next)
self.first.show()
self.netcode = get_current_netcode()
self.privkeychain = KeyChain()
self.pubkeychain = KeyChain()
def run(self):
sys.exit(self.app.exec_())
def retranslateUi(self, first):
_translate = QtCore.QCoreApplication.translate
self.first.setWindowTitle(_translate("first", "Welcome"))
self.gen_seed_button.setText(_translate("first", "Generate Seed "))
self.next_button.setText(_translate("first", "Next"))
def next(self):
self.first.done(0)
self.start_second()
def start_second(self):
self.ui = Ui_second()
self.ui.setupUi(self.second)
master_node = BIP32Node(self.netcode, self.chain_code,
secret_exponent=int(self.secret_exponent,16))
master_pub_node = master_node.public_copy()
privkey_tree_item = PrivKeyTreeWidgetItem(key_type='Private Key',
level='m', key=master_node)
pubkey_tree_item = PubKeyTreeWidgetItem(key_type='Public Key',
level='M', key=master_pub_node)
self.privkeychain.append(privkey_tree_item)
self.pubkeychain.append(pubkey_tree_item)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.show_context_menu)
self.ui.treeWidget.addTopLevelItems([privkey_tree_item, pubkey_tree_item])
self.second.show()
def show_context_menu(self, pos):
menu = QtWidgets.QMenu()
menu.addAction("Rename", self.rename_key)
if self.pubkeychain.isSelected():
menu.addAction("Create PubKey", self.derive_pub_from_pub)
elif self.privkeychain.isSelected():
menu.addAction("Create PubKey", self.derive_pub_from_priv)
menu.addAction("Create PrivKey", self.derive_priv)
action = menu.exec_(self.ui.treeWidget.mapToGlobal(pos))
def rename_key(self):
item = self.privkeychain.get_selected_key()
item.setText(0,'Hi')
def derive_pub_from_priv(self, index=1):
parent_level = self.privkeychain.get_selected_level()
parent_key = self.privkeychain.get_selected_key().key
self._derive_pub(parent_key, parent_level, self.privkeychain)
def derive_pub_from_pub(self, index=1):
parent_level = self.pubkeychain.get_selected_level()
parent_key = self.pubkeychain.get_selected_key().key
self._derive_pub(parent_key, parent_level, self.pubkeychain)
def _derive_pub(self, parent_key, parent_level, keychain, index=2):
child_key_type = 'Public Key'
child_key = parent_key.subkey(as_private=False)
if parent_level[0] not in ['N', 'M']:
child_key_label = 'N({})'.format(parent_level) + '/{}'.format(index)
else:
child_key_label = parent_level + '/{}'.format(index)
child_tree_item = PubKeyTreeWidgetItem(child_key_type, child_key_label,
child_key)
self.pubkeychain.append(child_tree_item)
keychain.get_selected_key().addChild(child_tree_item)
def derive_priv(self, index=2):
parent_level = self.privkeychain.get_selected_level()
parent_key = self.privkeychain.get_selected_key().key
child_key_type = 'Private Key'
child_key_label = parent_level + '/{}'.format(index)
child_key = parent_key.subkey(as_private=True)
child_tree_item = PrivKeyTreeWidgetItem(child_key_type,
child_key_label, child_key)
self.privkeychain.append(child_tree_item)
self.privkeychain.get_selected_key().addChild(child_tree_item)
def gen_seed(self):
self.chain_code, self.secret_exponent = get_chain_secret_pair()
self.ui.secret_exponent.setText(self.secret_exponent)
self.ui.chain_code.setText(self.chain_code)
def detect_text(self, text):
if len(text) > 0:
self.ui.next_button.setEnabled(True)
else:
self.ui.next_button.setEnabled(False)
app = App()
app.run()
|
"""
.dat export base handler
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/cli/exporter/dat/handler.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: e174a100d8076d0d726202a95b85507e5dbf6c58 $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
.dat export base handler
Agreement
===============================================================================
See PyPoE/LICENSE
"""
# =============================================================================
# Imports
# =============================================================================
# Python
# 3rd-party
from tqdm import tqdm
# self
from PyPoE.poe.constants import VERSION
from PyPoE.poe.file import dat
from PyPoE.cli.core import console, Msg
from PyPoE.cli.exporter import config
from PyPoE.cli.exporter.util import get_content_ggpk, get_content_ggpk_path
# =============================================================================
# Globals
# =============================================================================
__all__ = []
# =============================================================================
# Classes
# =============================================================================
class DatExportHandler(object):
def add_default_arguments(self, parser):
"""
:param parser:
:type parser: argparse.ArgumentParser
:return:
"""
parser.set_defaults(func=self.handle)
parser.add_argument(
'--files', '--file',
help='.dat files to export',
nargs='*',
)
def handle(self, args):
ver = config.get_option('version')
if ver != VERSION.STABLE:
console('Loading specification for %s' % ver)
dat.reload_default_spec(version=ver)
spec = dat._default_spec
if args.files is None:
args.files = list(spec)
else:
files = set()
for file_name in args.files:
if file_name in spec:
files.add(file_name)
elif not file_name.endswith('.dat'):
file_name += '.dat'
if file_name not in spec:
console('.dat file "%s" is not in specification. Removing.' % file_name, msg=Msg.error)
else:
files.add(file_name)
files = list(files)
files.sort()
args.files = files
def _read_dat_files(self, args, prefix=''):
path = get_content_ggpk_path()
console(prefix + 'Reading "%s"...' % path)
ggpk = get_content_ggpk(path)
console(prefix + 'Reading .dat files')
dat_files = {}
ggpk_data = ggpk['Data']
remove = []
for name in tqdm(args.files):
try:
node = ggpk_data[name]
except FileNotFoundError:
console('Skipping "%s" (missing)' % name, msg=Msg.warning)
remove.append(name)
continue
df = dat.DatFile(name)
df.read(file_path_or_raw=node.record.extract(), use_dat_value=False)
dat_files[name] = df
for file_name in remove:
args.files.remove(file_name)
return dat_files
# =============================================================================
# Functions
# =============================================================================
|
'''
Calculates the 12C(p,gamma) cross section and compares it to the Vogl data.
"Free" parameters:
* ANC (1/2-)
* level energy (1/2+)
* partial width (1/2+, elastic)
* partial width (1/2+, capture)
'''
import os
import sys
from multiprocessing import Pool
import emcee
import numpy as np
from scipy import stats
import model
########################################
# We'll set up the sampler and get it started.
nd = model.azr.config.nd
nw = 2*nd # number of walkers = 2 * number of sampled parameters
# Pick a point (theta) in parameter space around which we'll start each walker.
theta0 = [2.1, 2.37, 33600, -0.6325, 1, 1]
# theta0 = np.array(model.azr.config.get_input_values())
# theta0 = np.array([[pi.rvs() for pi in model.priors] for _ in range(nw)])
# Each walkers needs its own starting position. We'll take normally distributed
# random values centered at theta0.
p0 = np.zeros((nw, nd))
mask = np.array([0.01, 0.0001, 0.01, 0.01, 0.01, 0.01])
for i in range(nw):
mu = theta0
sig = np.abs(theta0) * mask # 1% width
p0[i, :] = stats.norm(mu, sig).rvs()
# We'll store the chain in test_mcmc.h5. (See emcee Backends documentation.)
backend = emcee.backends.HDFBackend('chain.h5')
backend.reset(nw, nd)
nsteps = 10 # How many steps should each walker take?
nthin = 1 # How often should the walker save a step?
nprocs = 4 # How many Python processes do you want to allocate?
# AZURE2 and emcee are both parallelized. We'll restrict AZURE2 to 1 thread to
# simplify things.
os.environ['OMP_NUM_THREADS'] = '1'
# emcee allows the user to specify the way the ensemble generates proposals.
moves = [(emcee.moves.DESnookerMove(), 0.8), (emcee.moves.DEMove(), 0.2)]
with Pool(processes=nprocs) as pool:
sampler = emcee.EnsembleSampler(nw, nd, model.lnP, moves=moves, pool=pool,
backend=backend)
state = sampler.run_mcmc(p0, nsteps, thin_by=nthin, progress=True, tune=True)
|
from fnmatch import fnmatch
def test_fixture(regression_file_path):
assert fnmatch(str(regression_file_path.relative), "[01].xmf")
assert fnmatch(
str(regression_file_path.absolute),
"*/test_regression_file_path_fixture0/references/case/[01].xmf",
)
|
import numpy as np
from HungarianAlgorithm.model import hungarian
from HungarianAlgorithm.matrix_generator import gen_matrix_given_permutation, gen_matrix
if __name__ == '__main__':
"""
Some test ready to go, to see utilities and limitations.
"""
p = [0, 2, 3, 5, 1, 4]
a = gen_matrix_given_permutation(10, 6, p)
resp = hungarian(a)
print('Cost matrix:')
print(a)
print('Solution:')
print(resp)
a = np.array([[0, 9, 7, 8, 1, 9],
[5, 8, 0, 4, 2, 6],
[5, 9, 5, 0, 3, 7],
[7, 6, 8, 2, 2, 0],
[9, 0, 7, 4, 2, 8],
[2, 1, 4, 6, 0, 7]])
resp = hungarian(a, max_num_percolation=50)
print('Cost matrix:')
print(a)
print('Solution:')
print(resp)
a = np.array([[1, 1, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 0, 1, 1],
[0, 0, 0, 0, 0]])
resp = hungarian(a, max_num_percolation=500)
print('Cost matrix:')
print(a)
print('Solution:')
print(resp)
a = gen_matrix(60, 15)
resp = hungarian(a, max_num_percolation=5000)
print('Cost matrix:')
print(a)
print('Solution:')
print(resp)
# counterexample: matrix with too many zeros:
# Bugs must be fixed for sparse matrices - code may fail here.
a = gen_matrix(6, 15)
print('\n\n --- Particular case where the algorithm fails --- ')
print('\n For this matrix we expect the algorithm to raise an issue '
'(too sparse respect to the max num percolation):')
print('\nCost matrix:')
print(a)
resp = hungarian(a, max_num_percolation=10000)
print('Solution:')
print(resp)
|
from tests.graph_case import GraphTestCase
class TestPlanner(GraphTestCase):
@classmethod
def setUpClass(cls):
super(TestPlanner, cls).setUpClass()
@classmethod
def tearDownClass(cls):
pass
def test2_list_my_plans(self):
my_plans = self.client.me.planner.plans.get().execute_query()
self.assertIsNotNone(my_plans.resource_path)
|
"""
ゼロから学ぶスパイキングニューラルネットワーク
- Spiking Neural Networks from Scratch
Copyright (c) 2020 HiroshiARAKI. All Rights Reserved.
"""
import numpy as np
import matplotlib.pyplot as plt
class HodgkinHuxley:
def __init__(self, time, dt, rest=-65., Cm=1.0, gNa=120., gK=36., gl=0.3, ENa=50., EK=-77., El=-54.387):
"""
Initialize Neuron parameters
:param time: experimental time
:param dt: time step
:param rest: resting potential
:param Cm: membrane capacity
:param gNa: Na+ channel conductance
:param gK: K+ channel conductance
:param gl: other (Cl) channel conductance
:param ENa: Na+ equilibrium potential
:param EK: K+ equilibrium potential
:param El: other (Cl) equilibrium potentials
"""
self.time = time
self.dt = dt
self.rest = rest
self.Cm = Cm
self.gNa = gNa
self.gK = gK
self.gl = gl
self.ENa = ENa
self.EK = EK
self.El = El
def calc(self, i):
""" compute membrane potential """
# initialize parameters
v = self.rest
n = 0.32
m = 0.05
h = 0.6
v_monitor = []
n_monitor = []
m_monitor = []
h_monitor = []
time = int(self.time / self.dt)
# update time
for t in range(time):
# calc channel gating kinetics
n += self.dn(v, n)
m += self.dm(v, m)
h += self.dh(v, h)
# calc tiny membrane potential
dv = (i[t] -
self.gK * n**4 * (v - self.EK) - # K+ current
self.gNa * m**3 * h * (v - self.ENa) - # Na+ current
self.gl * (v - self.El)) / self.Cm # other current
# calc new membrane potential
v += dv * self.dt
# record
v_monitor.append(v)
n_monitor.append(n)
m_monitor.append(m)
h_monitor.append(h)
return v_monitor, n_monitor, m_monitor, h_monitor
def dn(self, v, n):
return (self.alpha_n(v) * (1 - n) - self.beta_n(v) * n) * self.dt
def dm(self, v, m):
return (self.alpha_m(v) * (1 - m) - self.beta_m(v) * m) * self.dt
def dh(self, v, h):
return (self.alpha_h(v) * (1 - h) - self.beta_h(v) * h) * self.dt
def alpha_n(self, v):
return 0.01 * (10 - (v - self.rest)) / (np.exp((10 - (v - self.rest))/10) - 1)
def alpha_m(self, v):
return 0.1 * (25 - (v - self.rest)) / (np.exp((25 - (v - self.rest))/10) - 1)
def alpha_h(self, v):
return 0.07 * np.exp(-(v - self.rest) / 20)
def beta_n(self, v):
return 0.125 * np.exp(-(v - self.rest) / 80)
def beta_m(self, v):
return 4 * np.exp(-(v - self.rest) / 18)
def beta_h(self, v):
return 1 / (np.exp((30 - (v - self.rest))/10) + 1)
if __name__ == '__main__':
# init experimental time and time-step
time = 300 # 実験時間 (観測時間)
dt = 2**-4 # 時間分解能 (HHモデルは結構小さめでないと上手く計算できない)
# Hodgkin-Huxley Neuron
neuron = HodgkinHuxley(time, dt)
# 入力データ (面倒臭いので適当な矩形波とノイズを合成して作った)
input_data = np.sin(0.5 * np.arange(0, time, dt))
input_data = np.where(input_data > 0, 20, 0) + 10 * np.random.rand(int(time/dt))
input_data_2 = np.cos(0.4 * np.arange(0, time, dt) + 0.5)
input_data_2 = np.where(input_data_2 > 0, 10, 0)
input_data += input_data_2
# 膜電位などを計算
v, m, n, h = neuron.calc(input_data)
# plot
plt.figure(figsize=(12, 6))
x = np.arange(0, time, dt)
plt.subplot(3, 1, 1)
plt.plot(x, input_data)
plt.ylabel('I [μA/cm2]')
plt.subplot(3, 1, 2)
plt.plot(x, v)
plt.ylabel('V [mV]')
plt.subplot(3, 1, 3)
plt.plot(x, n, label='n')
plt.plot(x, m, label='m')
plt.plot(x, h, label='h')
plt.xlabel('time [ms]')
plt.ylabel('Conductance param')
plt.legend()
plt.show()
|
"""
All visualization performed my mdsea is contained in this directory.
Different visualization platforms are developed in different files.
"""
|
"""Extract signatures from an image."""
import cv2
import matplotlib.pyplot as plt
from skimage import measure, morphology
from skimage.measure import regionprops
def extractor(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] # ensure binary
# connected component analysis by scikit-learn framework
blobs = img > img.mean()
blobs_labels = measure.label(blobs, background=1)
the_biggest_component = 0
total_area = 0
counter = 0
average = 0.0
for region in regionprops(blobs_labels):
if region.area > 10:
total_area = total_area + region.area
counter = counter + 1
# print region.area # (for debugging)
# take regions with large enough areas
if region.area >= 250:
if region.area > the_biggest_component:
the_biggest_component = region.area
average = (total_area/counter)
# a4_constant is used as a threshold value to remove connected pixels
# are smaller than a4_constant for A4 size scanned documents
a4_constant = ((average / 84.0) * 250.0) + 100
# remove the connected pixels are smaller than del_constant
b = morphology.remove_small_objects(blobs_labels, a4_constant)
# save the the pre-version which is the image is labelled with colors
# as considering connected components
plt.imsave('pre_version.png', b)
# read the pre-version
img = cv2.imread('pre_version.png', 0)
# ensure binary
result = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY_INV)[1]
return result
|
users = {}
messages = []
queues = []
|
import requests
from bs4 import BeautifulSoup
import time
#BTC_URL = 'https://ru.investing.com/crypto/bitcoin/btc-usd'
#convert = soup.findAll("span", {"class": "arial_26 inlineblock pid-1058142-last"})
#ETH_URL = 'https://ru.investing.com/crypto/ethereum/eth-usd'
#convert = soup.findAll("span", {"class": "arial_26 inlineblock pid-945629-last"})
class Currency:
def __init__(self):
self.current_converted_price = float(self.get_currency_price().replace(",", "."))
XRP_URL = 'https://ru.investing.com/crypto/xrp/xrp-usd'
HEADER = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
current_converted_price = 0
difference = 0.00001
def get_currency_price(self):
full_page = requests.get(self.XRP_URL, headers=self.HEADER)
soup = BeautifulSoup(full_page.content, 'html.parser')
convert = soup.findAll("span", {"class": "arial_26 inlineblock pid-1118146-last"})
return convert[0].text
def get_result(self):
return True
def check_currency(self):
currency = float(self.get_currency_price().replace(",", "."))
if currency >= self.current_converted_price + self.difference:
self.total = "Курс вырос на 10 центов: " + str(currency)
print(self.total)
self.get_result()
elif currency <= self.current_converted_price - self.difference:
self.total = "Курс упал на 10 центов: " + str(currency)
print(self.total)
self.get_result()
print(currency)
time.sleep(10)
def get_answer(self):
return self.total
|
from functools import reduce
import wx
from vistas.core.observers.camera import CameraObservable
from vistas.core.preferences import Preferences
from vistas.ui.controllers.project import ProjectChangedEvent
from vistas.ui.controls.viewer_panel import ViewerPanel
from vistas.ui.events import EVT_CAMERA_MODE_CHANGED, EVT_CAMERA_SYNC, CameraSyncEvent
class ViewerContainerPanel(wx.Panel):
"""
A container panel that provides access to all active viewer panels and handles adding, removing and resizing
window rows and columns. Also provides access for synchronizing ViewerPanels when mouse events occur.
"""
class Row:
def __init__(self):
self.viewers = []
self.num_viewers = 0
self.prev_row = None
def __init__(self, parent, id):
super().__init__(parent, id)
self.num_viewers = 0
self.wireframe = False
self.selection_view = False
self.rows = []
self.num_columns = Preferences.app().get('viewer_itemsperrow', 2)
self.AddViewer()
# Events
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(EVT_CAMERA_MODE_CHANGED, self.OnCameraModeChanged)
self.Bind(EVT_CAMERA_SYNC, self.OnCameraSyncEvent)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
def OnDestroy(self, event):
self.Unbind(EVT_CAMERA_MODE_CHANGED)
def AddViewer(self, new_viewer=None):
# Add new row if necessary
if self.num_viewers % self.num_columns == 0:
self.AddRow()
last_row = self.rows[-1]
# Create new viewer
if new_viewer is None:
new_viewer = ViewerPanel(self, wx.ID_ANY)
new_viewer.HideResizeAreas()
new_viewer.ResetNeighbors()
index = last_row.num_viewers
last_row.viewers[index] = new_viewer
last_row.num_viewers += 1
self.num_viewers += 1
# Size proportions for the new viewer
new_viewer.width = 1 / last_row.num_viewers
new_viewer.height = 1 / len(self.rows)
for viewer in last_row.viewers[:index]:
viewer.width *= index * (1 / last_row.num_viewers)
# Set neighbors
if last_row.num_viewers > 1:
new_viewer.SetNeighbor(last_row.viewers[index - 1], ViewerPanel.WEST)
last_row.viewers[index - 1].SetNeighbor(new_viewer, ViewerPanel.EAST)
if last_row.prev_row is not None and last_row.prev_row.num_viewers >= last_row.num_viewers:
for viewer in last_row.prev_row.viewers:
new_viewer.SetNeighbor(viewer, ViewerPanel.NORTH)
viewer.SetNeighbor(new_viewer, ViewerPanel.SOUTH)
self.UpdateViewerSizes()
observable = CameraObservable.get()
if observable.is_sync:
self.SyncAllCameras(False, False)
self.SyncAllCameras(True, True)
new_viewer.ResetCameraInteractor()
def RemoveViewer(self, viewer=None):
# Can't remove the last viewer
if self.num_viewers < 2:
return
if viewer is None:
row = self.rows[-1]
viewer = row.viewers[row.num_viewers - 1]
for row in self.rows:
if viewer in row.viewers:
index = row.viewers.index(viewer)
viewer = row.viewers[index]
row.viewers[index] = None
viewer.legend_window.Destroy()
viewer.Destroy()
self.num_viewers -= 1
self.Rebuild()
return
def RefreshAllViewers(self):
for row in self.rows:
for viewer in row.viewers[:row.num_viewers]:
viewer.gl_canvas.Refresh()
def UpdateViewerSizes(self):
for row in self.rows:
for viewer in row.viewers[:row.num_viewers]:
x = 0
y = 0
neighbor = viewer.GetNeighbor(ViewerPanel.WEST)
if neighbor:
x = neighbor.GetPosition().x + neighbor.GetSize().GetWidth()
neighbor = viewer.GetNeighbor(ViewerPanel.NORTH)
if neighbor:
y = neighbor.GetPosition().y + neighbor.GetSize().GetHeight()
viewer.SetSize(
x, y, self.GetSize().GetWidth() * viewer.width,
self.GetSize().GetHeight() * viewer.height
)
viewer.gl_canvas.camera_controls.reposition()
def OnSize(self, event):
self.UpdateViewerSizes()
def Rebuild(self):
rows = self.rows
self.rows = []
self.num_viewers = 0
for row in rows:
for viewer in (x for x in row.viewers if x is not None):
self.AddViewer(viewer)
def AddRow(self):
new_row = self.Row()
new_row.viewers = list(None for _ in range(self.num_columns))
if self.rows:
new_row.prev_row = self.rows[-1]
for row in self.rows:
for viewer in row.viewers[:row.num_viewers]:
viewer.height *= len(self.rows) * (1 / (len(self.rows) + 1))
self.rows.append(new_row)
def ProjectChanged(self, event):
if event.change == ProjectChangedEvent.PROJECT_RESET:
while self.num_viewers > 1:
self.RemoveViewer()
self.GetMainViewerPanel().RefreshScenes()
self.GetMainViewerPanel().UpdateLegend()
self.GetMainViewerPanel().UpdateOverlay()
else:
for row in self.rows:
for i in range(row.num_viewers):
row.viewers[i].ProjectChanged(event)
def GetMainViewerPanel(self):
return self.rows[0].viewers[0]
def GetAllViewerPanels(self):
return reduce(lambda x, y: x + y, (row.viewers[:row.num_viewers] for row in self.rows))
def ToggleWireframe(self):
self.wireframe = not self.wireframe
for viewer in self.GetAllViewerPanels():
viewer.camera.wireframe = self.wireframe
viewer.camera.scene.render_bounding_boxes = self.wireframe
viewer.Refresh()
def ToggleSelectionView(self):
self.selection_view = not self.selection_view
for viewer in self.GetAllViewerPanels():
viewer.camera.selection_view = self.selection_view
viewer.Refresh()
def OnCameraModeChanged(self, event):
if CameraObservable.get().is_sync:
self.SyncAllCameras(True, False)
def OnCameraSyncEvent(self, event: CameraSyncEvent):
if CameraObservable.get().is_sync:
canvas = event.GetEventObject()
for panel in self.GetAllViewerPanels():
if canvas is not panel.gl_canvas:
interactor = panel.gl_canvas.camera_interactor
interactor.sync(event.interactor)
def SyncAllCameras(self, do_sync, save_state):
observable = CameraObservable.get()
if do_sync:
interactor = self.GetMainViewerPanel().gl_canvas.camera_interactor
observable.sync_camera(interactor, save_state)
for panel in self.GetAllViewerPanels():
if panel is not self.GetMainViewerPanel():
panel.gl_canvas.camera_controls.hide()
else:
main_panel_interactor = observable.global_interactor
observable.unsync_camera()
if main_panel_interactor is not None:
self.GetMainViewerPanel().gl_canvas.camera_interactor = main_panel_interactor
for panel in self.GetAllViewerPanels():
if panel is not None and panel is not self.GetMainViewerPanel():
panel.gl_canvas.camera_controls.show()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class RedHat(models.Model):
PROTOCOL_CHOICES = (
#(0,''),
('rest', 'REST API'),
('soap', 'SOAP'),
('telnet', 'Telnet'),
('ssh', 'SSH'),
)
hostname = models.CharField(max_length=127)
ip = models.GenericIPAddressField()
protocol = models.CharField(max_length=15, choices=PROTOCOL_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
class Junos(models.Model):
PROTOCOL_CHOICES = (
#(0,''),
('netconf', 'Netconf'),
('telnet', 'Telnet'),
('ssh', 'SSH'),
)
hostname = models.CharField(max_length=127)
ip = models.GenericIPAddressField(max_length=15)
protocol = models.CharField(max_length=15, choices=PROTOCOL_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
class Junose(models.Model):
PROTOCOL_CHOICES = (
#(0,''),
('netconf', 'Netconf'),
('telnet', 'Telnet'),
('ssh', 'SSH'),
)
hostname = models.CharField(max_length=127)
ip = models.GenericIPAddressField(max_length=15)
protocol = models.CharField(max_length=15, choices=PROTOCOL_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
"""
PROTOCOL_CHOICES = (
(0,''),
(1,'REST API'),
(2,'SOAP'),
(3,'Telnet'),
(4,'TL1'),
(5,'SSH'),
)
SYSTEM_CHOICES = (
(0,''),
(1,'Nokia AMS5520'),
(2,'Arris AXSVision'),
(3,'Calix CMS'),
(4,'Junos'),
(5,'Junose'),
(6,'RedHat'),
(7,'Tellabs Panorama'),
)
class Cmds(models.Model):
cmdprotocol = models.CharField(max_length=25, choices=CMDPROTOCOL_CHOICES, default=0)
cmdsystem = models.CharField(max_length=25, choices=CMDSYSTEM_CHOICES, default=0)
cmdset = models.CharField(max_length=25, default='')
cmdline = models.CharField(max_length=255, default='')
timestamp = models.DateTimeField(auto_now_add=True)
"""
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands where they are evaluated
more than once from different directories.
"""
import TestGyp
test = TestGyp.TestGyp()
# This tests GYP's cache of commands, ensuring that the directory a command is
# run from is part of its cache key. Parallelism may lead to multiple cache
# lookups failing, resulting in the command being run multiple times by
# chance, not by GYP's logic. Turn off parallelism to ensure that the logic is
# being tested.
test.run_gyp('repeated_multidir/main.gyp', '--no-parallel')
test.pass_test()
|
#!/usr/bin/env python3
import time
import datetime
import dateutil.parser
from difflib import SequenceMatcher
import os
import requests
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as exp_c
from selenium.webdriver.common.by import By
from selenium.common.exceptions import *
from selenium.webdriver.remote.webelement import WebElement
import pymongo
from pymongo import MongoClient
from pymongo.collection import Collection
from pymongo.errors import WriteError
from session_manager import SessionManager
from domain.whatsapp_message import WhatsAppMessage
from alert_manager import AlertManager
import whatsapp.selenium_methods as selenium_methods
SELENIUM_TIMEOUT = 60
MESSAGE_LIMIT = 10
binary = FirefoxBinary('/usr/bin/firefox-developer-edition')
webdriver.DesiredCapabilities.FIREFOX["unexpectedAlertBehaviour"] = "accept"
class WhatsAppProcess:
def __init__(self) -> None:
super().__init__()
self._initialize_database()
self.alert_manager = AlertManager()
self.session = SessionManager
self.driver = self.session.get_existing_driver_session()
# self.driver = webdriver.Firefox(firefox_binary=binary)
# self.driver.get('https://web.whatsapp.com/')
self.wait = WebDriverWait(self.driver, SELENIUM_TIMEOUT)
self.processed_conversation_ids = []
self.previous_conversation_content = None
self.processed_contacts = []
def _initialize_database(self):
self.client = MongoClient('mongodb', 27017)
self.db = self.client.whatsapp_cli
# MongoDB scales horizontally, so: single messages collection containing all messages from all conversations
self.messages_collection: Collection = self.db.messages
self.messages_collection.create_index([("uid", pymongo.DESCENDING), ("timestamp", pymongo.DESCENDING),
("content", pymongo.DESCENDING)], unique=True)
def process_new_users(self):
# Generate processed_contacts list
self._for_each_conversation(self._process_contact)
# Send processed_contacts to API and get list of new users
new_user_contacts = self._get_new_user_contacts(self.processed_contacts)
# self.processed_contacts = list(filter(None.__ne__, self.processed_contacts))
print('Checking for new users')
if not new_user_contacts:
return
for number in new_user_contacts:
if selenium_methods.try_search_for_contact(self.driver, number):
# Wait until messages panel is visible
messages_panel = self.wait.until(lambda _: self.driver.find_element_by_xpath("//div[@class='_9tCEa']"))
# Dynamically load the number of messages defined by MESSAGE_LIMIT
messages_panel = self._load_messages_panel(messages_panel)
# Extract username from message launch sequence
username = self._process_new_user(number, messages_panel)
if username:
print('New user %s ~ %s' % (username, number))
# If in a test environment, don't create new user
if os.environ.get('ENABLE_DEBUG', True) is True:
new_uid = "UID_DEBUG"
else:
new_uid = self._create_new_user(contact_number=number, username=username)
if new_uid:
print('Successfully created user: %s' % new_uid)
else:
print('Failed to create user: %s' % number)
continue
else:
print('No valid launch sequence for %s' % number)
continue
# Once contact has been successfully processed, remove from list
new_user_contacts.remove(number)
else:
print('Couldn\'t find contact')
# Notify, via Slack, of any new users who couldn't be processed
self.alert_manager.slack_alert('Unable to add new users: %s' % new_user_contacts)
def _get_new_user_contacts(self, all_contacts):
new_and_existing = self._check_for_new_users(all_contacts)
new_user_contacts = list(filter(lambda x: True if not new_and_existing.get(x, False) else False, all_contacts))
for number in new_user_contacts:
print('New user: %s' % number)
return new_user_contacts
def _check_for_new_users(self, contact_numbers):
data = list(filter(lambda x: x is not None, map(lambda y: selenium_methods.clean_contact_number(y), contact_numbers)))
headers = {
'Content-Type': 'application/json',
'Authorization': 'REMOVED',
'Origin': 'https://myREMOVED.com'
}
req = requests.post("REMOVED:3000/user_recon", json=data, headers=headers)
if req.status_code == 200:
return req.json()
else:
print(req.status_code, req.reason)
return None
def _get_uid_from_number(self, contact_number):
data = {
'fullMobileNum': contact_number
}
headers = {
'Content-Type': 'application/json',
'Authorization': 'REMOVED',
'Origin': 'https://myREMOVED.com'
}
endpoint = "REMOVED:3000/user_uid_from_full_mobile_num"
req = requests.post(endpoint, json=data, headers=headers)
if req.status_code == 200:
return req.json()['userUID']
else:
print(req.status_code, req.reason)
return None
def _create_new_user(self, contact_number, username=None):
contact_number = selenium_methods.clean_contact_number(contact_number)
data = {
'fullMobileNum': contact_number,
'username': username
}
headers = {
'Content-Type': 'application/json',
'Authorization': 'REMOVED',
'Origin': 'REMOVED'
}
endpoint = "REMOVED/users/create_user_full_num"
req = requests.post(endpoint, json=data, headers=headers)
if req.status_code == 200:
return req.json()['userUID']
else:
print(req.status_code, req.reason)
return None
def process_messages(self):
self._for_each_conversation(self._process_conversation)
def _for_each_conversation(self, func):
print('Ensuring connection okay')
# self.session.wait_until_connection_okay()
print('Fetching conversations panel')
conversations_panel: WebElement = self._get_conversations_panel()
print('Fetching conversations')
def conversations(): return conversations_panel.find_elements_by_class_name('_2wP_Y')
print('Processing conversations')
self._side_pane_scroll_to(0)
while True:
def sorted_conversations(): return sorted(conversations(), key=lambda el: self._get_element_y_position(el))
conversations_copy = sorted_conversations().copy()
# print(list(map(lambda conv: '%s ~ %s' % (conv.id, conv.text), conversations_copy)))
for index in range(len(conversations_copy)):
func(conversations_copy[index])
scroll_progress = self._side_pane_scroll_top()
scroll_max = self._side_pane_scroll_top_max()
if scroll_progress == scroll_max:
break
last_processed_conversation = conversations_copy[-1]
self._scroll_into_view(last_processed_conversation, True)
time.sleep(0.1)
progress = round((scroll_progress/scroll_max)*100)
print('Progress: %s' % progress)
def _process_contact(self, conversation: WebElement):
# Fetch contact name/number element - if contact saved, would appear as name
contact_name_number = self.wait.until(lambda _: conversation.find_element_by_xpath(
".//span[@class='_1wjpf']")).get_attribute('title')
if contact_name_number not in self.processed_contacts:
cleaned = selenium_methods.clean_contact_number(contact_name_number)
if cleaned:
self.processed_contacts.append(cleaned)
return contact_name_number
def _process_conversation(self, conversation: WebElement):
print('\nProcessing conversation...')
uid = None
try:
# Assuming the user is not saved as a contact, 'contact_id' will return the number
contact_id = self.wait.until(lambda _: conversation.find_element_by_xpath(".//span[@class='_1wjpf']")) \
.get_attribute('title')
contact_id = selenium_methods.clean_contact_number(contact_id)
if not contact_id:
print('Invalid contact ID')
return False
# Try get uid from local database, otherwise perform network call,
# if this fails then user needs to be created first
uid = self.get_uid_from_number_db(contact_id)
if not uid:
uid = self._get_uid_from_number(contact_id)
if not uid:
print('User needs to be created')
last_message_content = self.wait.until(lambda _: conversation.find_element_by_xpath(".//span[@class='_2_LEW']")) \
.get_attribute('title')
last_message = WhatsAppMessage(uid=uid, timestamp=None, sender_name=None, sender_number=contact_id, content=last_message_content)
del last_message.timestamp
del last_message.sender_name
if self.messages_in_sync(last_message):
print('Messages in sync')
return True
print('Processing conversation %s: ID - %s' % (conversation.id, contact_id))
except NoSuchElementException:
print('No such element')
return False
messages_panel = self.load_conversation_messages_panel(conversation)
self.previous_conversation_content = messages_panel
if uid:
# messages = self._extract_and_save_messages(messages_panel)
# print('Saving messages to database')
# for message in messages:
# message.uid = uid
# self.save_message(message)
# print('Message: %s' % message.__dict__)
return True
else:
username = self._process_new_user(contact_id, messages_panel)
if username:
print('New user %s ~ %s' % (username, contact_id))
new_uid = self._create_new_user(contact_number=contact_id, username=username)
if new_uid:
print('Successfully created user: %s' % new_uid)
else:
print('Failed to create user: %s' % contact_id)
else:
print('No valid launch sequence for %s' % contact_id)
return False
def load_conversation_messages_panel(self, conversation):
self.wait.until(lambda _: conversation and conversation.is_displayed() and conversation.is_enabled())
while True:
try:
conversation.click()
break
except ElementClickInterceptedException:
time.sleep(0.1)
continue
# If moving from active conversation, wait for content to refresh after click
# while True:
# self.wait.until(lambda _: conversation and conversation.is_displayed() and conversation.is_enabled())
# conversation.click()
# try:
# conversation.click()
# break
# except ElementClickInterceptedException:
# self._scroll_into_view(conversation, False)
# continue
if self.previous_conversation_content:
self.wait.until(exp_c.staleness_of(self.previous_conversation_content))
messages_panel = self.wait.until(lambda _: conversation.find_element_by_xpath("//div[@class='_9tCEa']"))
# self.wait.until(
# lambda _: 'loading' not in messages_panel.find_element_by_class_name('_3dGYA').get_attribute('title'))
# Scroll through all messages until MESSAGE_LIMIT messages are scraped, or we reach the top
return self._load_messages_panel(messages_panel)
def _load_messages_panel(self, messages_panel):
# Scroll through all messages until MESSAGE_LIMIT messages are scraped, or we reach the top
try:
while len(messages_panel.find_elements_by_class_name('vW7d1')) < MESSAGE_LIMIT:
try:
load_spinner = WebDriverWait(self.driver, 2) \
.until(lambda _: self.driver.find_element_by_xpath("//div[@class='_3dGYA']"))
self._scroll_into_view(load_spinner, True)
except (TimeoutException, StaleElementReferenceException):
break
self.wait.until(lambda _: not self._messages_are_loading())
except NoSuchElementException:
pass
return messages_panel
def _scroll_into_view(self, web_element, align_top: bool):
return self.driver.execute_script('return arguments[0].scrollIntoView(%s);' % 'true' if align_top else 'false', web_element)
def _get_element_y_position(self, web_element):
return self.driver.execute_script('return arguments[0].getBoundingClientRect().top;', web_element)
def _scroll_top(self, web_element):
return self.driver.execute_script('return arguments[0].scrollTop;', web_element)
def _scroll_top_max(self, web_element):
return self.driver.execute_script('return arguments[0].scrollTopMax;', web_element)
def _side_pane_scroll_top(self):
side_pane = self.driver.find_element_by_id('pane-side')
return self._scroll_top(side_pane)
def _side_pane_scroll_top_max(self):
side_pane = self.driver.find_element_by_id('pane-side')
return self._scroll_top_max(side_pane)
def _side_pane_scroll_by(self, pixels):
side_pane = self.driver.find_element_by_id('pane-side')
return self.driver.execute_script('return arguments[0].scrollBy(0, %d);' % pixels, side_pane)
def _side_pane_scroll_to(self, pixels):
side_pane = self.driver.find_element_by_id('pane-side')
return self.driver.execute_script('return arguments[0].scrollTo(0, %d);' % pixels, side_pane)
def _messages_are_loading(self):
try:
def load_spinner(): self.driver.find_element_by_xpath("//div[@class='_3dGYA']")
if load_spinner():
return 'loading' in load_spinner().get_attribute('title')
else:
return False
except NoSuchElementException:
return False
def _get_conversations_panel(self):
conversations_panel = None
try:
conversations_panel = self.wait.until(
exp_c.visibility_of_element_located((By.XPATH, "//div[@class='RLfQR']")))
except TimeoutException:
pass
return conversations_panel
def _find_conversation_by_id(self, contact_id):
conversations_panel: WebElement = self._get_conversations_panel()
try:
selector = ".//span[@class='_1wjpf'][@title='%s']/ancestor::div[@class='_2wP_Y']" % contact_id
return conversations_panel.find_element_by_xpath(selector)
except NoSuchElementException:
return None
def _extract_and_save_messages(self, messages_panel):
messages: [WhatsAppMessage] = []
def append_message(message): messages.append(message)
self._for_each_message(messages_panel, append_message)
return messages
def _process_new_user(self, contact_id, messages_panel):
# Initialize variables
messages: [WhatsAppMessage] = []
username = None
# Iterate through user's messages and append to list
def append_message(msg): messages.append(msg)
self._for_each_message(messages_panel, append_message)
# Remove any messages not from sender i.e. from Wattie itself
messages = list(filter(lambda x: selenium_methods.clean_contact_number(x.sender_number)
== selenium_methods.clean_contact_number(contact_id), messages))
# For each message, try find launch phrase and process
for message in messages:
# Split message content into words
word_list = message.content.split()
# Strip whitespace
word_list = [word.strip() for word in word_list]
# Find launch words
launch_words = list(filter(lambda word: self.similar('launch', word.lower(), 0.75)
or self.similar('start', word.lower(), 0.75), word_list))
if not launch_words or not message.content.strip().startswith(launch_words[0]):
continue
# Remove launch words
word_list = [word for word in word_list if word not in launch_words]
# Remaining words should be name and surname
if not word_list:
continue
name, *surname = word_list
username = " ".join(word_list)
# Remove non-alpha characters
username = "".join([c for c in username if c.isalpha() or c.isspace()])
if username:
return username
return username
def similar(self, a, b, threshold):
return SequenceMatcher(None, a, b).ratio() >= threshold
def _for_each_message(self, messages_panel, func):
message_elements: [WebElement] = lambda: messages_panel \
.find_elements_by_xpath(".//div[@class='vW7d1'][position() <= %d]" % MESSAGE_LIMIT)
number_elements = len(message_elements())
for index in range(number_elements):
try:
details_el: WebElement = message_elements()[index] \
.find_element_by_xpath(".//div[@class='Tkt2p']/div[1]")
except NoSuchElementException:
try:
details_el: WebElement = message_elements()[index] \
.find_element_by_xpath(".//div[@class='Tkt2p']/div[2]")
except NoSuchElementException:
continue
details = details_el.get_attribute('data-pre-plain-text')
if details:
time_string: str = details[details.find('[')+1:details.find(']')]
sender_id: str = details.replace('[%s]' % time_string, '', 1).strip().replace(':', '', 1)
else:
continue
# content: str = self.wait.until(lambda x: message_elements()[index].find_element_by_xpath(
# ".//span[@class='selectable-text invisible-space copyable-text']")).text
try:
content: str = message_elements()[index].find_element_by_xpath(
".//span[@class='selectable-text invisible-space copyable-text']").text
except NoSuchElementException:
continue
message = self.create_message('', time_string=time_string, sender_name=None,
sender_number=sender_id, content=content)
func(message)
def create_message(self, uid, time_string, sender_name, sender_number, content):
# Time string format: [18:44, 7/8/2018]
# See http://strftime.org/
# Timestamp can change, so using dateutil instead
# timestamp = datetime.datetime.strptime(time_string, "%H:%M, %m/%d/%Y").replace(tzinfo=datetime.timezone.utc)
timestamp = dateutil.parser.parse(time_string).replace(tzinfo=datetime.timezone.utc)
msg = WhatsAppMessage(uid, timestamp, sender_name, sender_number, content)
return msg
def save_message(self, msg: WhatsAppMessage):
# message_json = json.dumps(msg.__dict__, indent=4, sort_keys=True, default=str)
# message_json = dumps(msg.__dict__)
# Insert object into messages_collection and log database id
try:
schedule_id = self.messages_collection.insert_one(msg.__dict__).inserted_id
print('Message inserted in database with ID ' + str(schedule_id))
return schedule_id
except WriteError:
print('Duplicate message exists in database')
return None
def delete_message(self, msg_id):
print('Deleting message from database')
self.messages_collection.delete_many({"_id": msg_id})
def purge_all_messages(self):
print('Deleting all messages from database')
self.messages_collection.delete_many({})
def get_uid_from_number_db(self, contact_number):
message = self.messages_collection.find_one({'sender_number': contact_number})
if message:
return message['uid']
else:
return None
def messages_in_sync(self, last_message: WhatsAppMessage):
last_message_dict = last_message.__dict__
if self.messages_collection.find_one(last_message_dict):
return True
else:
return False
|
import numpy as np
from pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython
from pyhlm.internals.hlm_states import WeakLimitHDPHLMStates
from pyhlm.word_model import LetterHSMM, LetterHSMMPython
import pyhsmm
from tqdm import trange
import warnings
warnings.filterwarnings('ignore')
import time
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from util.config_parser import ConfigParser_with_eval
from pathlib import Path
#%% parse arguments
hypparams_model = "hypparams/model.config"
hypparams_letter_duration = "hypparams/letter_duration.config"
hypparams_letter_hsmm = "hypparams/letter_hsmm.config"
hypparams_letter_observation = "hypparams/letter_observation.config"
hypparams_pyhlm = "hypparams/pyhlm.config"
hypparams_word_length = "hypparams/word_length.config"
hypparams_superstate = "hypparams/superstate.config"
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--model", default=hypparams_model, help="hyper parameters of model")
parser.add_argument("--letter_duration", default=hypparams_letter_duration, help="hyper parameters of letter duration")
parser.add_argument("--letter_hsmm", default=hypparams_letter_hsmm, help="hyper parameters of letter HSMM")
parser.add_argument("--letter_observation", default=hypparams_letter_observation, help="hyper parameters of letter observation")
parser.add_argument("--pyhlm", default=hypparams_pyhlm, help="hyper parameters of pyhlm")
parser.add_argument("--word_length", default=hypparams_word_length, help="hyper parameters of word length")
parser.add_argument("--superstate", default=hypparams_superstate, help="hyper parameters of superstate")
args = parser.parse_args()
hypparams_model = args.model
hypparams_letter_duration = args.letter_duration
hypparams_letter_hsmm = args.letter_hsmm
hypparams_letter_observation = args.letter_observation
hypparams_pyhlm = args.pyhlm
hypparams_word_length = args.word_length
hypparams_superstate = args.superstate
#%%
def load_config(filename):
cp = ConfigParser_with_eval()
cp.read(filename)
return cp
#%%
def load_datas():
data = []
names = np.loadtxt("files.txt", dtype=str)
files = names
for name in names:
data.append(np.loadtxt("DATA/" + name + ".txt"))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur)
unpacked[d-1] = 1.0
return unpacked
def save_stateseq(model):
# Save sampled states sequences.
names = np.loadtxt("files.txt", dtype=str)
for i, s in enumerate(model.states_list):
with open("results/" + names[i] + "_s.txt", "a") as f:
np.savetxt(f, s.stateseq, fmt="%d")
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.letter_stateseq, fmt="%d")
with open("results/" + names[i] + "_d.txt", "a") as f:
np.savetxt(f, unpack_durations(s.durations_censored), fmt="%d")
def save_params_as_text(itr_idx, model):
with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
def save_params_as_file(iter_idx, model):
params = model.params
root_dir = Path("parameters/ITR_{0:04d}".format(iter_idx))
root_dir.mkdir(exist_ok=True)
save_json(root_dir, params)
def save_json(root_dir, json_obj):
for keyname, subjson in json_obj.items():
type_of_subjson = type(subjson)
if type_of_subjson == dict:
dir = root_dir / keyname
dir.mkdir(exist_ok=True)
save_json(dir, json_obj[keyname])
else:
savefile = root_dir / f"{keyname}.txt"
if type_of_subjson == np.ndarray:
if subjson.dtype in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]:
np.savetxt(savefile, subjson, fmt="%d")
else:
np.savetxt(savefile, subjson)
else:
savefile.write_text(str(subjson))
def save_params_as_npz(iter_idx, model):
params = model.params
flatten_params = flatten_json(params)
# flatten_params = copy_flatten_json(flatten_params)
np.savez(f"parameters/ITR_{iter_idx:04d}.npz", **flatten_params)
def flatten_json(json_obj, keyname_prefix=None, dict_obj=None):
if dict_obj is None:
dict_obj = {}
if keyname_prefix is None:
keyname_prefix = ""
for keyname, subjson in json_obj.items():
if type(subjson) == dict:
prefix = f"{keyname_prefix}{keyname}/"
flatten_json(subjson, keyname_prefix=prefix, dict_obj=dict_obj)
else:
dict_obj[f"{keyname_prefix}{keyname}"] = subjson
return dict_obj
def unflatten_json(flatten_json_obj):
dict_obj = {}
for keyname, value in flatten_json_obj.items():
current_dict = dict_obj
splitted_keyname = keyname.split("/")
for key in splitted_keyname[:-1]:
if key not in current_dict:
current_dict[key] = {}
current_dict = current_dict[key]
current_dict[splitted_keyname[-1]] = value
return dict_obj
def copy_flatten_json(json_obj):
new_json = {}
for keyname, subjson in json_obj.items():
type_of_subjson = type(subjson)
if type_of_subjson in [int, float, complex, bool]:
new_json[keyname] = subjson
elif type_of_subjson in [list, tuple]:
new_json[keyname] = subjson[:]
elif type_of_subjson == np.ndarray:
new_json[keyname] = subjson.copy()
else:
raise NotImplementedError(f"type :{type_of_subjson} can not copy. Plz implement here!")
return new_json
def save_loglikelihood(model):
with open("summary_files/log_likelihood.txt", "a") as f:
f.write(str(model.log_likelihood()) + "\n")
def save_resample_times(resample_time):
with open("summary_files/resample_times.txt", "a") as f:
f.write(str(resample_time) + "\n")
#%%
Path("results").mkdir(exist_ok=True)
Path("parameters").mkdir(exist_ok=True)
Path("summary_files").mkdir(exist_ok=True)
#%% config parse
config_parser = load_config(hypparams_model)
section = config_parser["model"]
thread_num = section["thread_num"]
pretrain_iter = section["pretrain_iter"]
train_iter = section["train_iter"]
word_num = section["word_num"]
letter_num = section["letter_num"]
observation_dim = section["observation_dim"]
hlm_hypparams = load_config(hypparams_pyhlm)["pyhlm"]
config_parser = load_config(hypparams_letter_observation)
obs_hypparams = [config_parser[f"{i+1}_th"] for i in range(letter_num)]
config_parser = load_config(hypparams_letter_duration)
dur_hypparams = [config_parser[f"{i+1}_th"] for i in range(letter_num)]
len_hypparams = load_config(hypparams_word_length)["word_length"]
letter_hsmm_hypparams = load_config(hypparams_letter_hsmm)["letter_hsmm"]
superstate_config = load_config(hypparams_superstate)
#%% make instance of distributions and model
letter_obs_distns = [pyhsmm.distributions.Gaussian(**hypparam) for hypparam in obs_hypparams]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**hypparam) for hypparam in dur_hypparams]
dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for _ in range(word_num)]
length_distn = pyhsmm.distributions.PoissonDuration(**len_hypparams)
letter_hsmm = LetterHSMM(**letter_hsmm_hypparams, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
model = WeakLimitHDPHLM(**hlm_hypparams, letter_hsmm=letter_hsmm, dur_distns=dur_distns, length_distn=length_distn)
#%%
files = np.loadtxt("files.txt", dtype=str)
datas = load_datas()
#%% Pre training.
for data in datas:
letter_hsmm.add_data(data, **superstate_config["DEFAULT"])
for t in trange(pretrain_iter):
letter_hsmm.resample_model(num_procs=thread_num)
letter_hsmm.states_list = []
#%%
print("Add datas...")
for name, data in zip(files, datas):
model.add_data(data, **superstate_config[name], generate=False)
model.resample_states(num_procs=thread_num)
# # or
# for name, data in zip(files, datas):
# model.add_data(data, **superstate_config[name], initialize_from_prior=False)
print("Done!")
#%% Save init params
# save_params_as_text(0, model)
# save_params_as_file(0, model)
save_params_as_npz(0, model)
save_loglikelihood(model)
#%%
for t in trange(train_iter):
st = time.time()
model.resample_model(num_procs=thread_num)
resample_model_time = time.time() - st
save_stateseq(model)
save_loglikelihood(model)
# save_params_as_text(t+1, model)
# save_params_as_file(t+1, model)
save_params_as_npz(t+1, model)
save_resample_times(resample_model_time)
print(model.word_list)
print(model.word_counts())
print(f"log_likelihood:{model.log_likelihood()}")
print(f"resample_model:{resample_model_time}")
|
"""
2 3
2 3 ok
2 3 ok
3 1
3 1 errados
3 1 errados
5 5
5 5 ok
5 5 ok
5 4
5 4 errados
5 4 ok
5 4
5 4 errados
5 4 ok
9 9
9 9 ok
9 9 ok
0 0
0 0 errados
0 0 ok
0 1
0 1 ok
0 1 ok
1 0
1 0 errados
1 0 ok
89 25
89 25 errados
89 25 errados
4 5
4 5 ok
4 5 ok
10 12
10 12 errados
10 12 errados
10 11
10 11 ok
10 11 ok
11 10
11 10 errados
11 10 ok
"""
x, y = input().split()
x, y = [int(x), int(y)]
if y == 0:
print("{} {} errados".format(x, y))
elif x == y or abs(y-x) <= 1 and x < y and y != 0:
print("{} {} ok".format(x, y))
else:
print("{} {} errados".format(x, y))
|
from django import template
from ..apps.basket import my_middleware
register = template.Library()
@register.simple_tag
def request_made():
my_middleware.my_list.append("end of request")
return my_middleware.my_list[0:-1]
@register.simple_tag
def request_made_len():
return len(my_middleware.my_list[0:-1])
|
# Copyright Indra Soluciones Tecnologías de la Información, S.L.U.
# 2013-2019 SPAIN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
if os.path.abspath(os.path.dirname(__file__)) not in sys.path:
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from microservice.controller import ModelController
if __name__ == '__main__':
ModelController.run(debug=True, host="0.0.0.0", port=5000)
|
import datetime
import dgl
import errno
import numpy as np
import os
import pickle
import random
import torch
import nni
from dgl.data.utils import download, get_download_dir, _get_dgl_url
from pprint import pprint
from scipy import sparse
from scipy import io as sio
def set_random_seed(seed=0):
"""Set random seed.
Parameters
----------
seed : int
Random seed to use
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def mkdir_p(path, log=True):
"""Create a directory for the specified path.
Parameters
----------
path : str
Path name
log : bool
Whether to print result for directory creation
"""
try:
os.makedirs(path)
if log:
print('Created directory {}'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path) and log:
print('Directory {} already exists.'.format(path))
else:
raise
def get_date_postfix():
"""Get a date based postfix for directory name.
Returns
-------
post_fix : str
"""
dt = datetime.datetime.now()
post_fix = '{}_{:02d}-{:02d}-{:02d}'.format(
dt.date(), dt.hour, dt.minute, dt.second)
return post_fix
def setup_log_dir(args, sampling=False):
"""Name and create directory for logging.
Parameters
----------
args : dict
Configuration
Returns
-------
log_dir : str
Path for logging directory
sampling : bool
Whether we are using sampling based training
"""
date_postfix = get_date_postfix()
log_dir = os.path.join(
args['log_dir'],
'{}_{}'.format(args['dataset'], date_postfix))
if sampling:
log_dir = log_dir + '_sampling'
mkdir_p(log_dir)
return log_dir
# The configuration below is from the paper.
# RECEIVED_PARAMS = nni.get_next_parameter()
# lr = RECEIVED_PARAMS['learning_rate']
# dr = RECEIVED_PARAMS['dropout_rate']
lr = 0.003
dr = 0.6
default_configure = {
'lr': lr, # Learning rate
'num_heads': [2], # Number of attention heads for node-level attention
'hidden_units': 32,
'dropout': dr,
'weight_decay': 0.001,
'num_epochs': 10,
'patience': 20
}
sampling_configure = {
'batch_size': 20
}
def setup(args):
args.update(default_configure)
set_random_seed(args['seed'])
args['dataset'] = 'dblp'
args['device'] = 'cuda:0' if torch.cuda.is_available() else 'cpu'
args['log_dir'] = setup_log_dir(args)
return args
def setup_for_sampling(args):
args.update(default_configure)
args.update(sampling_configure)
set_random_seed()
args['device'] = 'cuda:0' if torch.cuda.is_available() else 'cpu'
args['log_dir'] = setup_log_dir(args, sampling=True)
return args
def get_binary_mask(total_size, indices):
mask = torch.zeros(total_size)
mask[indices] = 1
return mask.byte()
def load_dblp(remove_self_loop):
NODE_FEATURE = 'Dataprocessing/dblp.v12/data/node_feature.pkl'
EDGES = 'Dataprocessing/dblp.v12/data/edges.pkl'
with open(NODE_FEATURE,'rb') as f:
node_features = pickle.load(f)
with open(EDGES,'rb') as f:
edges = pickle.load(f)
# A_ap,A_pa,A_ao,A_oa
a_vs_p = edges[0]
p_vs_a = edges[1]
a_vs_o = edges[2]
o_vs_a = edges[3]
hg = dgl.heterograph({
('author', 'ap', 'paper'): a_vs_p.nonzero(),
('paper', 'pa', 'author'): p_vs_a.nonzero(),
('author', 'ao', 'org'): a_vs_o.nonzero(),
('org', 'oa', 'author'): o_vs_a.nonzero()
})
features = torch.tensor(node_features, dtype=torch.float32)
num_classes = 64
return hg, features, num_classes
def load_data(dataset, remove_self_loop=False):
if dataset == 'dblp':
return load_dblp(remove_self_loop)
else:
return NotImplementedError('Unsupported dataset {}'.format(dataset))
class EarlyStopping(object):
def __init__(self, patience=10):
dt = datetime.datetime.now()
self.filename = 'early_stop_{}_{:02d}-{:02d}-{:02d}.pth'.format(
dt.date(), dt.hour, dt.minute, dt.second)
self.patience = patience
self.counter = 0
self.best_acc = None
self.best_loss = None
self.early_stop = False
def step(self, loss, acc, model):
if self.best_loss is None:
self.best_acc = acc
self.best_loss = loss
self.save_checkpoint(model)
elif (loss > self.best_loss) and (acc < self.best_acc):
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
if (loss <= self.best_loss) and (acc >= self.best_acc):
self.save_checkpoint(model)
self.best_loss = np.min((loss, self.best_loss))
self.best_acc = np.max((acc, self.best_acc))
self.counter = 0
return self.early_stop
def save_checkpoint(self, model):
"""Saves model when validation loss decreases."""
torch.save(model.state_dict(), self.filename)
def load_checkpoint(self, model):
"""Load the latest checkpoint."""
model.load_state_dict(torch.load(self.filename))
|
import environ
import socket
ROOT_DIR = environ.Path(__file__) - 3 # (safe_relay_service/config/settings/base.py - 3 = safe-relay-service/)
APPS_DIR = ROOT_DIR.path('safe_relay_service')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
DOT_ENV_FILE = env('DJANGO_DOT_ENV_FILE', default=None)
if READ_DOT_ENV_FILE or DOT_ENV_FILE:
DOT_ENV_FILE = DOT_ENV_FILE or '.env'
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path(DOT_ENV_FILE)))
# GENERAL
DEBUG = env.bool('DJANGO_DEBUG', False)
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
# DATABASES
psql_url = 'psql://' + env('POSTGRES_USER') + ':' + env('POSTGRES_PASSWORD') + '@' + env('POSTGRES_HOST') + ':' + env('POSTGRES_PORT') + '/' + env('POSTGRES_DATABASE_RELAYER')
DATABASES = {
'default': env.db('RELAYER_DATABASE', default=psql_url),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'corsheaders',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'drf_yasg',
]
LOCAL_APPS = [
'safe_relay_service.relay.apps.RelayConfig',
'safe_relay_service.tokens.apps.TokensConfig',
'safe_relay_service.gas_station.apps.GasStationConfig',
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
STATIC_ROOT = '/usr/share/nginx/html/relayer'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
MEDIA_ROOT = str(APPS_DIR('media'))
MEDIA_URL = '/media/'
# TEMPLATES
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# FIXTURES
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# ADMIN
ADMIN_URL = r'^admin/'
ADMINS = [
("""Circles""", 'webmaster@joincircles.net'),
]
MANAGERS = ADMINS
# Celery
INSTALLED_APPS += [
'safe_relay_service.taskapp.celery.CeleryConfig',
'django_celery_beat',
]
CELERY_BROKER_URL = env('CELERY_BROKER_URL', default='django://')
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
CELERY_ACCEPT_CONTENT = ['json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
# Django REST Framework
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny',),
'DEFAULT_RENDERER_CLASSES': (
'djangorestframework_camel_case.render.CamelCaseJSONRenderer',
),
'DEFAULT_PARSER_CLASSES': (
'djangorestframework_camel_case.parser.CamelCaseJSONParser',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning',
'EXCEPTION_HANDLER': 'safe_relay_service.relay.views.custom_exception_handler',
}
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'ignore_check_url': {
'()': 'safe_relay_service.relay.utils.IgnoreCheckUrl'
}
},
'formatters': {
'verbose': {
'format': '%(asctime)s [%(levelname)s] [%(processName)s] %(message)s',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'INFO',
},
'celery.worker.strategy': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'INFO',
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': True
},
'django.server': { # Gunicorn uses `gunicorn.access`
'level': 'INFO',
'handlers': ['console'],
'propagate': True,
'filters': ['ignore_check_url'],
},
}
}
REDIS_URL = env('RELAYER_REDIS_URL', default='redis://redis:6379/0')
# ETHEREUM
ETH_HASH_PREFIX = env('ETH_HASH_PREFIX', default='ETH')
ETHEREUM_NODE_URL = env('ETHEREUM_NODE_ENDPOINT', default=None)
ETHEREUM_TRACING_NODE_URL = env('ETHEREUM_TRACING_NODE_URL', default=ETHEREUM_NODE_URL)
GAS_STATION_NUMBER_BLOCKS = env('GAS_STATION_NUMBER_BLOCKS', default=300)
# SAFE
SAFE_FUNDER_PRIVATE_KEY = env('SAFE_FUNDER_PRIVATE_KEY', default=None)
# Maximum ether (no wei) for a single transaction (security limit)
SAFE_FUNDER_MAX_ETH = env.int('SAFE_FUNDER_MAX_ETH', default=0.1)
SAFE_FUNDING_CONFIRMATIONS = env.int('SAFE_FUNDING_CONFIRMATIONS', default=3) # Set to at least 3
# Master Copy Address of Safe Contract
SAFE_CONTRACT_ADDRESS = env('SAFE_ADDRESS', default='0x' + '0' * 39 + '1')
SAFE_V1_0_0_CONTRACT_ADDRESS = env('SAFE_ADDRESS', default='0x' + '0' * 39 + '1')
SAFE_V0_0_1_CONTRACT_ADDRESS = env('SAFE_ADDRESS', default='0x' + '0' * 39 + '1')
SAFE_VALID_CONTRACT_ADDRESSES = set(env.list('SAFE_VALID_CONTRACT_ADDRESSES',
default=['0x' + '0' * 39 + '1'])
) | {SAFE_CONTRACT_ADDRESS,
SAFE_V1_0_0_CONTRACT_ADDRESS,
SAFE_V0_0_1_CONTRACT_ADDRESS}
SAFE_PROXY_FACTORY_ADDRESS = env('PROXY_FACTORY_ADDRESS', default='0x' + '0' * 39 + '2')
SAFE_PROXY_FACTORY_V1_0_0_ADDRESS = env('PROXY_FACTORY_ADDRESS', default='0x' + '0' * 39 + '2')
SAFE_DEFAULT_CALLBACK_HANDLER = env('SAFE_DEFAULT_CALLBACK_HANDLER',
default='0x' + '0' * 39 + '3')
# If FIXED_GAS_PRICE is None, GasStation will be used
FIXED_GAS_PRICE = env.int('FIXED_GAS_PRICE', default=None)
SAFE_TX_SENDER_PRIVATE_KEY = env('SAFE_TX_SENDER_PRIVATE_KEY', default=None)
SAFE_CHECK_DEPLOYER_FUNDED_DELAY = env.int('SAFE_CHECK_DEPLOYER_FUNDED_DELAY', default=1 * 30)
SAFE_CHECK_DEPLOYER_FUNDED_RETRIES = env.int('SAFE_CHECK_DEPLOYER_FUNDED_RETRIES', default=10)
SAFE_FIXED_CREATION_COST = env.int('SAFE_FIXED_CREATION_COST', default=None)
SAFE_ACCOUNTS_BALANCE_WARNING = env.int('SAFE_ACCOUNTS_BALANCE_WARNING', default=200000000000000000) # 0.2 Eth
SAFE_TX_NOT_MINED_ALERT_MINUTES = env('SAFE_TX_NOT_MINED_ALERT_MINUTES', default=10)
NOTIFICATION_SERVICE_URI = env('NOTIFICATION_SERVICE_URI', default=None)
NOTIFICATION_SERVICE_PASS = env('NOTIFICATION_SERVICE_PASS', default=None)
TOKEN_LOGO_BASE_URI = env('TOKEN_LOGO_BASE_URI', default='')
TOKEN_LOGO_EXTENSION = env('TOKEN_LOGO_EXTENSION', default='.png')
# CACHES
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('RELAYER_REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True,
}
}
}
# CIRCLES
CIRCLES_HUB_ADDRESS = env('HUB_ADDRESS', default='0x' + '0' * 39 + '2')
GRAPH_NODE_ENDPOINT = env('GRAPH_NODE_ENDPOINT', default='')
MIN_TRUST_CONNECTIONS = env('MIN_TRUST_CONNECTIONS', default=3)
SUBGRAPH_NAME = env('SUBGRAPH_NAME', default='')
# DOCKER
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
INTERNAL_IPS += [ip[:-1] + '1' for ip in ips]
|
__version__ = '1.5.4.0'
|
# Generated by Django 2.1.4 on 2018-12-11 04:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('job_number', models.IntegerField()),
('description', models.TextField()),
('list_order', models.IntegerField()),
('machine', models.CharField(choices=[('SMSV', 'Small Starvision'), ('PIN1', 'Pinnacle 1'), ('PIN2', 'Pinnacle 2'), ('MBSV', 'Mid Bay Big Starvision')], default='PIN1', max_length=4)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='list.Customer')),
],
),
]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ------------------------------------------------------------------------------
# Copyright 2020. NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# Created by eeliu at 11/6/19
class SpanFactory(object):
def __init__(self,agent):
self.sequenceid = 0
self.agent = agent
def createSpan(self, stackMap):
raise NotImplemented
def createSpanEvent(self, stackMap):
raise NotImplemented
def attachSpanEvent(self, span, span_event):
raise NotImplemented
def makeSpan(self, stackMap):
self.sequenceid = 0
span = self.createSpan(stackMap)
if 'calls' in stackMap:
for called in stackMap['calls']:
self.makeSpanEv(span, called)
return span
def makeSpanEv(self, span, stackMap, depth=1):
span_ev = self.createSpanEvent(stackMap)
self.setSequenceid(span_ev, self.sequenceid)
self.sequenceid += 1
self.setDepth(span_ev, depth)
self.attachSpanEvent(span, span_ev)
if 'calls' in stackMap:
for called in stackMap['calls']:
self.makeSpanEv(span, called, depth + 1)
def setSequenceid(self, span_ev, id):
raise NotImplemented
def setDepth(self, span_ev, depth):
raise NotImplemented
|
import json
from bs4 import BeautifulSoup
import pandas as pd
import sys
# Argparsing
argument_index = 1
template = sys.argv[argument_index]
argument_index +=1
recall_json = sys.argv[argument_index]
argument_index +=1
recall_plot = sys.argv[argument_index]
argument_index +=1
precision_jsons_list = [sys.argv[i] for i in range(argument_index, len(sys.argv))]
precision_rows_list = []
# convert jsons back to dicts for html conversion
for json_path in precision_jsons_list:
with open(json_path, 'r') as json_file:
data = json.load(json_file)
precision_rows_list.append(data)
precision_df = pd.DataFrame(precision_rows_list)
precision_df = precision_df.sort_values(by='Round #')
precision_html_table = precision_df.to_html(index=False)
# Same for recall json
recall_rows_list = []
with open(recall_json, 'r') as json_file:
data=json.load(json_file)
recall_rows_list.append(data)
recall_df = pd.DataFrame(recall_rows_list)
recall_html_table = recall_df.to_html(index=False)
# Create html
with open(template, 'r') as template_file:
contents = template_file.read()
template_soup = BeautifulSoup(contents, features="html.parser")
p_list = template_soup.find_all('p')
p_index = 0
# Read recall table tag
recall_soup = BeautifulSoup(recall_html_table, features="html.parser")
table_tag = recall_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
image_tag = template_soup.new_tag('img')
image_tag['src']= f"./recall/{recall_plot}"
image_tag['width']= 700
image_tag['height']= 500
p_list[p_index].insert_after(image_tag)
p_index+=1
precision_soup = BeautifulSoup(precision_html_table, features="html.parser")
table_tag = precision_soup.find('table')
p_list[p_index].insert_after(table_tag)
p_index+=1
with open('spot_detection_qc_report.html', 'w') as result_file:
result_file.write(str( template_soup ))
|
#!/usr/bin/env python
"""test-imu-plot.py: Ask multiwii for raw IMU and plot it using pyqtgraph."""
__author__ = "Aldo Vargas"
__copyright__ = "Copyright 2016 Altax.net"
__license__ = "GPL"
__version__ = "1"
__maintainer__ = "Aldo Vargas"
__email__ = "alduxvm@gmail.com"
__status__ = "Development"
from pymultiwii import MultiWii
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
#board = MultiWii("/dev/tty.usbserial-A801WZA1")
board = MultiWii("/dev/tty.SLAB_USBtoUART")
win = pg.GraphicsWindow()
win.setWindowTitle('MultiWii IMU plotting')
p1 = win.addPlot()
win.nextRow()
p2 = win.addPlot()
data1 = [0] * 300
data2 = [0] * 300
data3 = [0] * 300
data4 = [0] * 300
data5 = [0] * 300
data6 = [0] * 300
curve1 = p1.plot(data1, name="ax", pen=(255,0,0))
curve2 = p1.plot(data2, name="ay", pen=(0,255,0))
curve3 = p1.plot(data3, name="az", pen=(0,0,255))
curve4 = p2.plot(data1, name="gx", pen=(255,0,0))
curve5 = p2.plot(data2, name="gy", pen=(0,255,0))
curve6 = p2.plot(data3, name="gz", pen=(0,0,255))
def update1():
global data1, curve1, board
board.getData(MultiWii.RAW_IMU)
t = float(board.rawIMU['timestamp'])
ax = board.rawIMU['ax']
ay = board.rawIMU['ay']
az = board.rawIMU['az']
gx = board.rawIMU['gx']
gy = board.rawIMU['gy']
gz = board.rawIMU['gz']
data1[:-1] = data1[1:]
data1[-1] = ax
data2[:-1] = data2[1:]
data2[-1] = ay
data3[:-1] = data3[1:]
data3[-1] = az
data4[:-1] = data4[1:]
data4[-1] = gx
data5[:-1] = data5[1:]
data5[-1] = gy
data6[:-1] = data6[1:]
data6[-1] = gz
curve1.setData(data1)
curve2.setData(data2)
curve3.setData(data3)
curve4.setData(data4)
curve5.setData(data5)
curve6.setData(data6)
def update():
update1()
timer = pg.QtCore.QTimer()
timer.timeout.connect(update)
timer.start(10)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
# -*- coding: UTF-8 -*-
import requests
import json
def get_api(api_url, api_name, token, record_id=None):
if not record_id:
return requests.get('{}/{}/?token={}'.format(api_url, api_name, token))
return requests.get('{}/{}/{}?token={}'.format(api_url, api_name,
record_id, token))
def post_api(api_url, api_name, token, data, _json=True):
if _json:
data = json.dumps(data)
headers = {'Content-Type': 'application/json;charset=UTF-8'}
return requests.post("{}{}?token={}".format(api_url, api_name, token),
data=data, headers=headers)
|
# to be defined once OK
pass
|
'''
# Criar variável para nome (str), idade (int),
# altura (float) e peso (float) de uma pessoa
# Criar variavel com ano atual (int)
# Obter o ano de nascimento da pessoa (baseada na idade e no ano atual)
# Obter o imc da pessoa com 2 casas decimais (peso e na altura da pesso)
# Exibir um texto com todos os valores na tela usando F-String (com as chaves)
'''
nome = 'Higor'
idade = 26
altura = 1.79
peso = 68.4
ano_atual = 2021
ano_nascimento = ano_atual - idade
imc = peso / (altura**2)
print(f'{nome} tem {idade} anos, {altura} de altura e peso {peso}Kg.')
print(f'O IMC de {nome} é {imc:.2f}.')
print(f'{nome} nasceu em {ano_nascimento}.')
|
#! 圆斑/重叠圆斑计数工具
import os
import otsu
import pylab
import numpy as np
import mahotas as mh
from PIL import Image
from PIL import ImageFilter
from PIL import ImageEnhance
FileName = input("输入测试图片名称(默认为edu.tif):") or "edu.tif"
CurrentDir = os.getcwd()
global new_img, r_criterion, g_criterion, b_criterion, gaussian
def rgb_filter(r, g, b, image):
image_data = image.load()
new_image = Image.new("RGB", (image.size[0], image.size[1]), (0, 0, 0))
criterion = [r, g, b]
luminance = [0, 0, 0]
filtered_luminance = [0, 0, 0]
for w in range(img.size[0]):
for h in range(img.size[1]):
luminance += image_data[w, h]
if image_data[w, h][0] > criterion[0]:
filtered_luminance[0] += image_data[w, h][0]
new_image.putpixel((w, h), (image_data[w, h][0], 0, 0))
if image_data[w, h][1] > criterion[1]:
filtered_luminance[1] += image_data[w, h][0]
new_image.putpixel((w, h), (0, image_data[w, h][1], 0))
if image_data[w, h][2] > criterion[2]:
filtered_luminance[2] += image_data[w, h][0]
new_image.putpixel((w, h), (0, 0, image_data[w, h][2]))
return new_image, luminance, filtered_luminance
def image_processing(image, gaussian_blur):
new_image = Image.new("RGB", (image.size[0], image.size[1]), (0, 0, 0))
contrast = ImageEnhance.Contrast(image)
# 图片预处理,参数可调整
new_image = contrast.enhance(5.0)
new_image = new_image.filter(ImageFilter.GaussianBlur(3))
# 大津二值化
new_image = new_image.convert('L')
new_image = otsu.otsu(new_image)
new_image = new_image.filter(ImageFilter.SMOOTH)
# new_image.show()
new_image.save("TEMP.PNG", "PNG")
# 开始调用mahotas方法
mahotas_image = mh.imread("TEMP.PNG")
mahotas_image = mh.gaussian_filter(mahotas_image, gaussian_blur)
# pylab.imshow(mhimgf)
# pylab.show()
mahotas_image = mahotas_image.astype(np.uint8)
T = mh.thresholding.otsu(mahotas_image)
labeled, number_of_spots = mh.label(mahotas_image > T)
seeds_image = mh.regmax(mahotas_image)
# pylab.imshow(mh.overlay(mahotas_image, seeds_image))
# pylab.show()
seeds, number_of_separated_spots = mh.label(seeds_image)
return number_of_spots, number_of_separated_spots
# 设置经验参数
# 1. 设置RGB参数,过滤颜色较暗的斑点
while True:
r_criterion = input("输入R通道参数(默认为0):") or 0
g_criterion = input("输入G通道参数(默认为0):") or 0
b_criterion = input("输入B通道参数(默认为0):") or 0
r_criterion = int(r_criterion)
g_criterion = int(g_criterion)
b_criterion = int(b_criterion)
img = Image.open(CurrentDir + "/Test/" + FileName)
new_img, lumi, flumi = rgb_filter(r_criterion, g_criterion, b_criterion, img)
print("原图总亮度:R: %i G: %i B: %i" % (lumi[0], lumi[1], lumi[2]))
print("修正总亮度:R: %i G: %i B: %i" % (flumi[0], flumi[1], flumi[2]))
print("*****************************************")
img.show()
new_img.show()
switch = input("中意吗QwQ?(y/n) ")
if switch == "y":
break
# 2.设置gaussian参数,用来设定对重叠细胞的分辨能力,数值越大分辨能力越低;一般设置为5-15之间
while True:
gaussian = input("输入Gaussian参数(默认为6):") or 6
gaussian = int(gaussian)
real_num = input("输入手动计数结果(默认为0):") or 0
real_num = int(real_num)
n_objects, n_nuclei = image_processing(new_img, gaussian)
print("拆分前斑点数:", n_objects)
print("拆分后斑点数:", n_nuclei)
print("误差:", real_num - n_nuclei)
switch = input("中意吗QwQ?(y/n) ")
if switch == "y":
break
# 测试用
# dist = mh.distance(mhimgf > T)
# dist = dist.max() - dist
# dist -= dist.min()
# dist = dist/float(dist.ptp()) * 255
# dist = dist.astype(np.uint8)
# pylab.imshow(dist)
# pylab.show()
# nuclei = mh.cwatershed(dist, seeds)
# pylab.imshow(nuclei)
# pylab.show()
print("***参数调试完毕,进入主程序***")
img_dir = input("输入文件存储目录(默认为Image):") or "Image"
data = open("NUCLEI_DATA", "w")
print("注意,请及时备份NUCLEI_DATA文件,每次运行程序将覆盖之")
queue = os.listdir(CurrentDir + "/" + img_dir)
for item in queue:
headers = item.strip(".tif").split("_")
for header in headers:
data.write(header + " ")
img = Image.open(CurrentDir + "/" + img_dir + "/" + item)
new_img, lumi, flumi = rgb_filter(r_criterion, g_criterion, b_criterion, img)
n_objects, n_nuclei = image_processing(new_img, gaussian)
data.write(str(n_objects) + " " + str(n_nuclei) + "\n")
|
"""
No Prefix Set
https://www.hackerrank.com/challenges/no-prefix-set/problem?h_r=next-challenge&h_v=zen
Given strings. Each string contains only lowercase letters from (both inclusive). The set of strings is said to be GOOD SET if no string is prefix of another string else, it is BAD SET. (If two strings are identical, they are considered prefixes of each other.)
For example, aab, abcde, aabcd is BAD SET because aab is prefix of aabcd.
Print GOOD SET if it satisfies the problem requirement.
Else, print BAD SET and the first string for which the condition fails.
Input Format
First line contains , the number of strings in the set.
Then next lines follow, where line contains string.
Constraints
Length of the string
Output Format
Output GOOD SET if the set is valid.
Else, output BAD SET followed by the first string for which the condition fails.
Sample Input00
7
aab
defgab
abcde
aabcde
cedaaa
bbbbbbbbbb
jabjjjad
Sample Output00
BAD SET
aabcde
Sample Input01
4
aab
aac
aacghgh
aabghgh
Sample Output01
BAD SET
aacghgh
Explanation
aab is prefix of aabcde. So set is BAD SET and it fails at string aabcde.
"""
class Node:
def __init__(self):
self.children = [None] * 10 # from a to j
self.isLeaf = False
def letterIndex(letter):
return ord(letter) - ord('a')
def add(root, name):
node = root
existingTree = True
for i in range(len(name)):
index = letterIndex(name[i])
if node.isLeaf:
return True
if not node.children[index]:
node.children[index] = Node()
existingTree = False
node = node.children[index]
if existingTree:
return True
node.isLeaf = True
return False
def solution(A):
root = Node()
for word in A:
if add(root, word):
print("BAD SET")
print(word)
return
print("GOOD SET")
# n = int(input())
# A = []
# for i in range(n):
# A.append(input())
# solution(A)
A = ["aab","defgab","abcde","aabcde","cedaaa","bbbbbbbbbb","jabjjjad"] # BAD SET aabcde
solution(A)
A = ["aab","aac","aacghgh","aabghgh"] # BAD Set aacghgh
solution(A)
A = ["hgiiccfchbeadgebc","biiga","edchgb","ccfdbeajaeid","ijgbeecjbj","bcfbbacfbfcfbhcbfjafibfhffac","ebechbfhfcijcjbcehbgbdgbh","ijbfifdbfifaidje","acgffegiihcddcdfjhhgadfjb","ggbdfdhaffhghbdh","dcjaichjejgheiaie","d"] # BAD Set d
solution(A)
stop = True
|
import requests
import sys
import httplib
import json
from . import betfairng
from datetime import datetime
class BFGlobalService(object):
def __init__(self, app_key, debuglevel=0, cert=None):
self.cert = cert
self.betting_api = betfairng.BettingApi(app_key=app_key, debuglevel=debuglevel)
def login(self, username, password):
r = betfairng.authenticate(self.cert, username, password)
if r['loginStatus'] == 'SUCCESS':
return LoginResp(APIResponseHeader(r['sessionToken']))
else:
return LoginResp(APIResponseHeader('', r['loginStatus']))
def getActiveEventTypes(self, session_token):
r = self.betting_api.list_event_types(session_token=session_token)
eventTypes = [EventType(i['eventType']['id'], i['eventType']['name']) for i in r]
return GetEventTypesResp(APIResponseHeader(session_token), eventTypes)
def getEvents(self, session_token, event_parent_id):
"this doesn't really work with api-ng!"
market_filter = dict(eventIds=[event_parent_id])
r = self.betting_api.list_events(session_token=session_token, filter=market_filter)
print r
class BFExchangeService(object):
def __init__(self, app_key, debuglevel=0):
self.betting_api = betfairng.BettingApi(app_key=app_key, debuglevel=debuglevel)
self.accounts_api = betfairng.AccountsApi(app_key=app_key, debuglevel=debuglevel)
def getAccountFunds(self, session_token):
r1 = self.accounts_api.get_account_funds(session_token=session_token)
r2 = self.accounts_api.get_account_details(session_token=session_token)
r1.update(r2)
return GetAccountFundsResp(APIResponseHeader(session_token), r1)
class APIResponseHeader(object):
def __init__(self, session_token, error_code='OK'):
self.sessionToken = session_token
self.errorCode = error_code
self.timestamp = datetime.utcnow()
def __str__(self):
return '(%s, %s, %s)' % (self.errorCode, self.sessionToken,
self.timestamp.isoformat())
class LoginResp(object):
def __init__(self, header):
self.header = header
self.currency = None
self.validUntil = datetime.max
self.errorCode = header.errorCode
def __str__(self):
return '''LoginResp
header: %s
currency %s
errorCode %s
validUntil %s
''' % (str(self.header), self.currency, self.errorCode,
self.validUntil.isoformat())
class GetEventTypesResp(object):
def __init__(self, header, event_type_items):
self.header = header
self.eventTypeItems = event_type_items
self.minorErrorCode = None
self.errorCode = header.errorCode
def __str__(self):
return '''GetEventTypesResp
header: %s
eventTypeItems: %s
errorCode: %s
''' % (str(self.header),
[ str(item) for item in self.eventTypeItems ],
self.errorCode)
class GetAccountFundsResp(object):
def __init__(self, header, obj):
self.header = header
self.availBalance = obj['availableToBetBalance']
self.commissionRetain = obj['retainedCommission']
self.creditLimit = None
self.currentBetfairPoints = obj['pointsBalance']
self.expoLimit = obj['exposureLimit']
self.exposure = obj['exposure']
self.holidaysAvailable = None
self.minorErrorCode = None
self.nextDiscount = obj['discountRate']
self.withdrawBalance = self.availBalance # not necessarily the same
# calculate old balance value
self.balance = self.availBalance + self.exposure + self.commissionRetain
def __str__(self):
return '''GetAccountFundsResp
header: %s
availBalance %.2f, exposure %.2f
''' % (str(self.header), self.availBalance, self.exposure)
class EventType(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __str__(self):
return '(%s: %s)' % (self.id, self.name)
if __name__ == "__main__":
import argparse
from getpass import getpass
parser = argparse.ArgumentParser(description='Python client for Betfair API v6')
parser.add_argument('username', help='Betfair username')
parser.add_argument('cert_file', help='Your API certificate file')
parser.add_argument('key_file', help='Your API private key file')
parser.add_argument('app_key', help='Your API application key')
args = parser.parse_args()
password = getpass('Enter password: ')
g = BFGlobalService(cert=(args.cert_file, args.key_file), app_key=args.app_key)
r = g.login(args.username, password)
r = g.getActiveEventTypes(r.header.sessionToken)
print str(r)
r = g.getEvents(r.header.sessionToken, 27280202)
e = BFExchangeService(args.app_key)
# r = e.getAccountFunds(r.header.sessionToken)
print str(r)
|
from time import time as timeout_timer
from .compat import XRANGE
try:
from __pypy__.time import clock_gettime
from __pypy__.time import CLOCK_MONOTONIC
def monotonic():
return clock_gettime(CLOCK_MONOTONIC)
except ImportError:
from timeit import default_timer
else:
default_timer = monotonic
def compute_timer_precision(timer):
precision = None
points = 0
timeout = timeout_timer() + 1.0
previous = timer()
while timeout_timer() < timeout or points < 5:
for _ in XRANGE(10):
t1 = timer()
t2 = timer()
dt = t2 - t1
if 0 < dt:
break
else:
dt = t2 - previous
if dt <= 0.0:
continue
if precision is not None:
precision = min(precision, dt)
else:
precision = dt
points += 1
previous = timer()
return precision
|
import random
import torch.optim.lr_scheduler as Sch
class RandomSelect(object):
def __init__(self, opt_list, stride=5):
super().__init__()
self.opt_list = opt_list
self.stride = stride
self.current = random.choice(self.opt_list)
self.sch = []
self.count = 0
def select(self):
self.count += 1
if len(self.sch) > 0:
self.sch[self.opt_list.index(self.current)].step()
if len(self.opt_list) > 1 and self.count % self.stride == 0:
for opt in self.opt_list:
opt.param_groups[0]['lr'] = self.current.param_groups[0]['lr']
self.current = random.choice(self.opt_list[1::])
else:
for opt in self.opt_list:
opt.param_groups[0]['lr'] = self.current.param_groups[0]['lr']
self.current = self.opt_list[0]
return self.current
def add_scheduler(self, type='StepLR', **kwds):
for opt in self.opt_list:
self.sch.append(Sch.StepLR(opt, step_size=10, gamma=0.999))
|
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .App import App
from .AppInventory import AppInventory
from .AppDomain import AppDomain
class CreateApplicationRequest(BaseSchema):
# Configuration swagger.json
app = fields.Nested(App, required=False)
configuration = fields.Nested(AppInventory, required=False)
domain = fields.Nested(AppDomain, required=False)
|
import pytest
from server import create_app
@pytest.fixture(scope='module')
def test_client():
flask_app = create_app()
testing_client = flask_app.test_client()
ctx = flask_app.app_context()
ctx.push()
yield testing_client # this is where the testing happens!
ctx.pop()
def test_fetch_transactions(test_client):
assert(1==1)
|
"""Main window class for TSL applications."""
from __future__ import annotations
import json
import logging
from dataclasses import dataclass
from typing import Optional
from PyQt5.QtCore import QSettings, pyqtSlot
from PyQt5.QtGui import QCloseEvent
from PyQt5.QtWidgets import QMainWindow, QMessageBox, QWidget
import qute_style.resources_rc # pylint: disable=unused-import # noqa: F401
from qute_style.whats_new_window import WhatsNewWindow
LOG_NAME = ".".join(["tsl", __name__])
log = logging.getLogger(LOG_NAME) # pylint: disable=invalid-name
@dataclass
class AppData:
"""Provide required data to startup threads."""
app_name: str = ""
app_version: str = ""
app_icon: str = ""
app_splash_icon: str = ""
help_text: str = ""
debug_text: str = ""
class TSLMainWindow(QMainWindow):
"""Main window class for TSL applications."""
WHATS_NEW = True
def __init__(
self,
app_data: AppData,
force_whats_new: bool = False,
registry_reset: bool = False,
parent: QWidget | None = None,
) -> None:
"""Create a new QuteStyleMainWindow."""
super().__init__(parent)
self._app_data = app_data
self._force_whats_new: bool = force_whats_new
self._whats_new_window: Optional[WhatsNewWindow] = None
if registry_reset:
log.debug("Resetting QSettings in Registry.")
QSettings().clear()
def show(self) -> None:
"""Override show to start update just before."""
self._load_settings()
super().show()
self._handle_last_run()
def _handle_last_run(self) -> None:
"""Check if the What's new window needs to be shown."""
last_run = QSettings().value("last_run", (0, 0, 0))
log.debug("Last run showed details for version %s", last_run)
current_ver = tuple(
int(num) for num in self._app_data.app_version.split(".")
)
if last_run < current_ver or self._force_whats_new and self.WHATS_NEW:
log.debug(
"Current version newer than last run %s",
self._app_data.app_version,
)
# if we force whats new display, we show the error message, even if
# nothing is visible.
self._display_whats_new(not self._force_whats_new)
QSettings().setValue("last_run", current_ver)
@pyqtSlot(name="on_whats_new")
def on_whats_new(self) -> None:
"""Display the WhatsNewWindow."""
# Slot shall not be called when WhatsNew is disabled.
assert self.WHATS_NEW
self._display_whats_new(False)
def _display_whats_new(self, silent: bool = True) -> None:
"""Display the Window with the changes for the current version."""
filename = (
f"changes/{self._app_data.app_version}/"
f"{self._app_data.app_version}.json"
)
try:
with open(
filename,
encoding="utf-8",
) as fhdl:
entries = json.loads(fhdl.read())
except FileNotFoundError:
log.warning("Changes file not found %s", filename)
return
if not entries:
if not silent:
log.warning("There are no relevant entries for the user.")
title = self.tr("Keine Neuerungen")
text = self.tr(
"Die aktuelle Version enthält keine Neuerungen,"
" die für den aktuellen Nutzer verfügbar sind."
)
QMessageBox.information(self, title, text)
return
self._whats_new_window = WhatsNewWindow(
entries, self._app_data.app_version
)
self._whats_new_window.show()
@pyqtSlot(QCloseEvent, name="closeEvent")
def closeEvent( # pylint: disable=invalid-name
self, close_event: QCloseEvent
) -> None:
"""Handle a close event."""
if self._whats_new_window:
log.debug("WhatsNewWindow is still open, closing.")
self._whats_new_window.close()
self._save_settings()
super().closeEvent(close_event)
def _save_settings(self) -> None:
"""Save the paint data and state/geometry settings."""
log.debug("Saving settings to registry.")
settings = QSettings()
settings.setValue("state", self.saveState())
settings.setValue("geometry", self.saveGeometry())
log.debug("Finished writing settings to registry")
def _load_settings(self) -> None:
"""Load geometry and state settings of the ui."""
log.debug("Loading settings from registry")
settings = QSettings()
try:
self.restoreGeometry(settings.value("geometry"))
except TypeError:
log.warning(
"Could not restore geometry from: %s",
settings.value("geometry"),
)
try:
self.restoreState(settings.value("state"))
except TypeError:
log.warning(
"Could not restore state from: %s", settings.value("state")
)
@pyqtSlot(name="on_about")
def on_about(self) -> None:
"""Show a message box about the used app version."""
log.debug(
"User pressed button to show dialog about %s",
self._app_data.app_name,
)
title = self.tr("Über {}").format(self._app_data.app_name)
QMessageBox.about(self, title, self._app_data.help_text)
|
"""Use pretained resnet50 from tensorflow slim"""
import tensorflow as tf
import numpy as np
import os
from slim_dir.nets import resnet_v1, resnet_utils
slim = tf.contrib.slim
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=False,
output_stride=None,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', resnet_v1.bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', resnet_v1.bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', resnet_v1.bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', resnet_v1.bottleneck, [(2048, 512, 1)] * 3)
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=global_pool, reuse=reuse,
scope=scope)
|
from base import BaseInstanceStep
from dbaas_credentials.models import CredentialType
from util import get_credentials_for
from util import build_context_script
from util import exec_remote_command_host
import logging
from system.models import Configuration
LOG = logging.getLogger(__name__)
class MetricsCollector(BaseInstanceStep):
def __init__(self, instance):
super(MetricsCollector, self).__init__(instance)
self.credential = get_credentials_for(
self.environment, CredentialType.TELEGRAF)
self.collector_allowed = self.credential.get_parameter_by_name(
'collector_allowed')
self.kafka_topic = self.credential.get_parameter_by_name(
'kafka_topic')
@property
def is_valid(self):
return str(self.collector_allowed).lower() == 'true'
@property
def script_variables(self):
user = self.driver.get_metric_collector_user(self.credential.user)
password = self.driver.get_metric_collector_password(
self.credential.password)
create_telegraf_config = True
if self.instance.instance_type == self.instance.REDIS_SENTINEL:
if len(self.host.instances.all()) > 1:
create_telegraf_config = False
create_default_file = self.instance.instance_type in (
self.instance.MYSQL, self.instance.MONGODB, self.instance.REDIS,
self.instance.MYSQL_PERCONA)
master_ssl_ca = None
if self.infra.ssl_configured:
from workflow.steps.util.ssl import InfraSSLBaseName
infra_ssl = InfraSSLBaseName(self.instance)
master_ssl_ca = infra_ssl.master_ssl_ca
variables = {
'HOSTNAME': self.host.hostname.split('.')[0],
'HOSTADDRESS': self.host.address,
'PORT': self.instance.port,
'USER': user,
'PASSWORD': password,
'MYSQL': self.instance.is_mysql,
'MONGODB': self.instance.instance_type == self.instance.MONGODB,
'REDIS': self.instance.instance_type == self.instance.REDIS,
'CREATE_TELEGRAF_CONFIG': create_telegraf_config,
'CREATE_DEFAULT_FILE': create_default_file,
'KAFKA_ENDPOINT': self.credential.endpoint,
'KAFKA_TOPIC': self.kafka_topic,
'SSL_CONFIGURED': self.infra.ssl_configured,
'MASTER_SSL_CA':master_ssl_ca
}
return variables
def do(self):
raise NotImplementedError
def undo(self):
pass
def exec_script(self, script):
output = {}
return_code = exec_remote_command_host(self.host, script, output)
if return_code != 0:
raise EnvironmentError(str(output))
LOG.info("output: {}".format(output))
return output
class ConfigureTelegraf(MetricsCollector):
def __unicode__(self):
return "Configuring Telegraf..."
def do(self):
if not self.is_valid:
return
template_script = self.plan.script.metric_collector_template
script = build_context_script(self.script_variables, template_script)
return self.exec_script(script)
class InstallTelegraf(MetricsCollector):
def __unicode__(self):
return "Installing Telegraf..."
def do(self):
if not self.is_valid:
return
script = "yum install telegraf -y"
self.exec_script(script)
class RestartTelegraf(MetricsCollector):
def __unicode__(self):
return "Restarting Telegraf..."
def do(self):
if not self.is_valid:
return
script = "/etc/init.d/telegraf restart"
self.exec_script(script)
class StopTelegraf(MetricsCollector):
def __unicode__(self):
return "Stopping Telegraf..."
def do(self):
if not self.is_valid:
return
script = "/etc/init.d/telegraf stop"
self.exec_script(script)
class CreateMetricCollectorDatabaseUser(MetricsCollector):
def __unicode__(self):
return "Creating metric collector database user..."
def do(self):
if not self.is_valid:
return
if self.driver.check_instance_is_master(self.instance):
self.driver.create_metric_collector_user(
username=self.credential.user,
password=self.credential.password)
def undo(self):
if not self.is_valid:
return
if self.driver.check_instance_is_master(self.instance):
self.driver.remove_metric_collector_user(
username=self.credential.user)
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: Mcl_Cmd_Firewall_Tasking.py
CMD_ACTION_STATUS = 1
CMD_ACTION_ENABLE = 2
CMD_ACTION_DELETE = 3
CMD_PROTOCOL_TCP = 0
CMD_PROTOCOL_UDP = 1
CMD_DIRECTION_IN = 0
CMD_DIRECTION_OUT = 1
def TaskingMain(namespace):
import mcl.imports
import mcl.target
import mcl.tasking
import mcl.tasking.technique
from mcl.object.Message import MarshalMessage
mcl.imports.ImportWithNamespace(namespace, 'mca.network.cmd.firewall', globals())
mcl.imports.ImportWithNamespace(namespace, 'mca.network.cmd.firewall.tasking', globals())
lpParams = mcl.tasking.GetParameters()
tgtParams = mca.network.cmd.firewall.Params()
tgtParams.provider = mcl.tasking.technique.Lookup('FIREWALL', mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, lpParams['method'])
if lpParams['action'] == CMD_ACTION_STATUS:
rpc = mca.network.cmd.firewall.tasking.RPC_INFO_QUERY
elif lpParams['action'] == CMD_ACTION_ENABLE:
if lpParams['portNum'] == 0:
mcl.tasking.OutputError('Invalid port number')
return False
tgtParams.cleanup = not lpParams['permanent']
tgtParams.portNum = lpParams['portNum']
if lpParams['protocol'] == CMD_PROTOCOL_TCP:
tgtParams.protocol = mca.network.cmd.firewall.FIREWALL_PROTOCOL_TCP
elif lpParams['protocol'] == CMD_PROTOCOL_UDP:
tgtParams.protocol = mca.network.cmd.firewall.FIREWALL_PROTOCOL_UDP
else:
mcl.tasking.OutputError('Invalid protocol')
return False
if lpParams['direction'] == CMD_DIRECTION_IN:
tgtParams.direction = mca.network.cmd.firewall.PARAMS_DIRECTION_IN
elif lpParams['direction'] == CMD_DIRECTION_OUT:
tgtParams.direction = mca.network.cmd.firewall.PARAMS_DIRECTION_OUT
else:
mcl.tasking.OutputError('Invalid direction')
return False
if lpParams['name'] != None:
tgtParams.name = lpParams['name']
if lpParams['group'] != None:
tgtParams.group = lpParams['group']
rpc = mca.network.cmd.firewall.tasking.RPC_INFO_ENABLE
elif lpParams['action'] == CMD_ACTION_DELETE:
if lpParams['portNum'] == 0:
mcl.tasking.OutputError('Invalid port number')
return False
tgtParams.portNum = lpParams['portNum']
if lpParams['protocol'] == CMD_PROTOCOL_TCP:
tgtParams.protocol = mca.network.cmd.firewall.FIREWALL_PROTOCOL_TCP
elif lpParams['protocol'] == CMD_PROTOCOL_UDP:
tgtParams.protocol = mca.network.cmd.firewall.FIREWALL_PROTOCOL_UDP
else:
mcl.tasking.OutputError('Invalid protocol')
return False
if lpParams['name'] != None:
tgtParams.name = lpParams['name']
rpc = mca.network.cmd.firewall.tasking.RPC_INFO_DELETE
else:
mcl.tasking.OutputError('Invalid action')
return False
taskXml = mcl.tasking.Tasking()
taskXml.AddProvider(mcl.tasking.technique.TECHNIQUE_MCL_NTNATIVEAPI, tgtParams.provider)
mcl.tasking.OutputXml(taskXml.GetXmlObject())
msg = MarshalMessage()
tgtParams.Marshal(msg)
rpc.SetData(msg.Serialize())
rpc.SetMessagingType('message')
res = mcl.tasking.RpcPerformCall(rpc)
if res != mcl.target.CALL_SUCCEEDED:
mcl.tasking.RecordModuleError(res, 0, mca.network.cmd.firewall.errorStrings)
return False
else:
return True
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1)
|
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import numpy as np
import math
import clip
from PIL import Image
from ZSSGAN.utils.text_templates import imagenet_templates, part_templates, imagenet_templates_small
class DirectionLoss(torch.nn.Module):
def __init__(self, loss_type='mse'):
super(DirectionLoss, self).__init__()
self.loss_type = loss_type
self.loss_func = {
'mse': torch.nn.MSELoss,
'cosine': torch.nn.CosineSimilarity,
'mae': torch.nn.L1Loss
}[loss_type]()
def forward(self, x, y):
if self.loss_type == "cosine":
return 1. - self.loss_func(x, y)
return self.loss_func(x, y)
class CLIPLoss(torch.nn.Module):
def __init__(self, device, lambda_direction=1., lambda_patch=0., lambda_global=0., lambda_manifold=0., lambda_texture=0., patch_loss_type='mae', direction_loss_type='cosine', clip_model='ViT-B/32'):
super(CLIPLoss, self).__init__()
self.device = device
self.model, clip_preprocess = clip.load(clip_model, device=self.device)
self.clip_preprocess = clip_preprocess
self.preprocess = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0])] + # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].
clip_preprocess.transforms[:2] + # to match CLIP input scale assumptions
clip_preprocess.transforms[4:]) # + skip convert PIL to tensor
self.target_direction = None
self.patch_text_directions = None
self.patch_loss = DirectionLoss(patch_loss_type)
self.direction_loss = DirectionLoss(direction_loss_type)
self.patch_direction_loss = torch.nn.CosineSimilarity(dim=2)
self.lambda_global = lambda_global
self.lambda_patch = lambda_patch
self.lambda_direction = lambda_direction
self.lambda_manifold = lambda_manifold
self.lambda_texture = lambda_texture
self.src_text_features = None
self.target_text_features = None
self.angle_loss = torch.nn.L1Loss()
self.model_cnn, preprocess_cnn = clip.load("RN50", device=self.device)
self.preprocess_cnn = transforms.Compose([transforms.Normalize(mean=[-1.0, -1.0, -1.0], std=[2.0, 2.0, 2.0])] + # Un-normalize from [-1.0, 1.0] (GAN output) to [0, 1].
preprocess_cnn.transforms[:2] + # to match CLIP input scale assumptions
preprocess_cnn.transforms[4:]) # + skip convert PIL to tensor
self.texture_loss = torch.nn.MSELoss()
def tokenize(self, strings: list):
return clip.tokenize(strings).to(self.device)
def encode_text(self, tokens: list) -> torch.Tensor:
return self.model.encode_text(tokens)
def encode_images(self, images: torch.Tensor) -> torch.Tensor:
images = self.preprocess(images).to(self.device)
return self.model.encode_image(images)
def encode_images_with_cnn(self, images: torch.Tensor) -> torch.Tensor:
images = self.preprocess_cnn(images).to(self.device)
return self.model_cnn.encode_image(images)
def distance_with_templates(self, img: torch.Tensor, class_str: str, templates=imagenet_templates) -> torch.Tensor:
text_features = self.get_text_features(class_str, templates)
image_features = self.get_image_features(img)
similarity = image_features @ text_features.T
return 1. - similarity
def get_text_features(self, class_str: str, templates=imagenet_templates, norm: bool = True) -> torch.Tensor:
template_text = self.compose_text_with_templates(class_str, templates)
tokens = clip.tokenize(template_text).to(self.device)
text_features = self.encode_text(tokens).detach()
if norm:
text_features /= text_features.norm(dim=-1, keepdim=True)
return text_features
def get_image_features(self, img: torch.Tensor, norm: bool = True) -> torch.Tensor:
image_features = self.encode_images(img)
if norm:
image_features /= image_features.clone().norm(dim=-1, keepdim=True)
return image_features
def compute_text_direction(self, source_class: str, target_class: str) -> torch.Tensor:
source_features = self.get_text_features(source_class)
target_features = self.get_text_features(target_class)
text_direction = (target_features - source_features).mean(axis=0, keepdim=True)
text_direction /= text_direction.norm(dim=-1, keepdim=True)
return text_direction
def compute_gen2img_direction(self, source_images: torch.Tensor, target_images: list) -> torch.Tensor:
with torch.no_grad():
src_encoding = self.get_image_features(source_images)
src_encoding = src_encoding.mean(dim=0, keepdim=True)
target_encodings = []
for target_img in target_images:
preprocessed = self.clip_preprocess(Image.open(target_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
target_encodings.append(encoding)
target_encoding = torch.cat(target_encodings, axis=0)
target_encoding = target_encoding.mean(dim=0, keepdim=True)
direction = target_encoding - src_encoding
direction /= direction.norm(dim=-1, keepdim=True)
return direction
def compute_img2img_direction(self, source_images: list, target_images: list) -> torch.Tensor:
with torch.no_grad():
source_encodings = []
for source_img in source_images:
preprocessed = self.clip_preprocess(Image.open(source_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
source_encodings.append(encoding)
src_encoding = torch.cat(source_encodings, axis=0)
src_encoding = src_encoding.mean(dim=0, keepdim=True)
target_encodings = []
for target_img in target_images:
preprocessed = self.clip_preprocess(Image.open(target_img)).unsqueeze(0).to(self.device)
encoding = self.model.encode_image(preprocessed)
encoding /= encoding.norm(dim=-1, keepdim=True)
target_encodings.append(encoding)
target_encoding = torch.cat(target_encodings, axis=0)
target_encoding = target_encoding.mean(dim=0, keepdim=True)
direction = target_encoding - src_encoding
direction /= direction.norm(dim=-1, keepdim=True)
return direction
def set_text_features(self, source_class: str, target_class: str) -> None:
source_features = self.get_text_features(source_class).mean(axis=0, keepdim=True)
self.src_text_features = source_features / source_features.norm(dim=-1, keepdim=True)
target_features = self.get_text_features(target_class).mean(axis=0, keepdim=True)
self.target_text_features = target_features / target_features.norm(dim=-1, keepdim=True)
def clip_angle_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.src_text_features is None:
self.set_text_features(source_class, target_class)
cos_text_angle = self.target_text_features @ self.src_text_features.T
text_angle = torch.acos(cos_text_angle)
src_img_features = self.get_image_features(src_img).unsqueeze(2)
target_img_features = self.get_image_features(target_img).unsqueeze(1)
cos_img_angle = torch.clamp(target_img_features @ src_img_features, min=-1.0, max=1.0)
img_angle = torch.acos(cos_img_angle)
text_angle = text_angle.unsqueeze(0).repeat(img_angle.size()[0], 1, 1)
cos_text_angle = cos_text_angle.unsqueeze(0).repeat(img_angle.size()[0], 1, 1)
return self.angle_loss(cos_img_angle, cos_text_angle)
def compose_text_with_templates(self, text: str, templates=imagenet_templates) -> list:
return [template.format(text) for template in templates]
def clip_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.target_direction is None:
self.target_direction = self.compute_text_direction(source_class, target_class)
src_encoding = self.get_image_features(src_img)
target_encoding = self.get_image_features(target_img)
edit_direction = (target_encoding - src_encoding)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
return self.direction_loss(edit_direction, self.target_direction).mean()
def global_clip_loss(self, img: torch.Tensor, text) -> torch.Tensor:
if not isinstance(text, list):
text = [text]
tokens = clip.tokenize(text).to(self.device)
image = self.preprocess(img)
logits_per_image, _ = self.model(image, tokens)
return (1. - logits_per_image / 100).mean()
def random_patch_centers(self, img_shape, num_patches, size):
batch_size, channels, height, width = img_shape
half_size = size // 2
patch_centers = np.concatenate([np.random.randint(half_size, width - half_size, size=(batch_size * num_patches, 1)),
np.random.randint(half_size, height - half_size, size=(batch_size * num_patches, 1))], axis=1)
return patch_centers
def generate_patches(self, img: torch.Tensor, patch_centers, size):
batch_size = img.shape[0]
num_patches = len(patch_centers) // batch_size
half_size = size // 2
patches = []
for batch_idx in range(batch_size):
for patch_idx in range(num_patches):
center_x = patch_centers[batch_idx * num_patches + patch_idx][0]
center_y = patch_centers[batch_idx * num_patches + patch_idx][1]
patch = img[batch_idx:batch_idx+1, :, center_y - half_size:center_y + half_size, center_x - half_size:center_x + half_size]
patches.append(patch)
patches = torch.cat(patches, axis=0)
return patches
def patch_scores(self, img: torch.Tensor, class_str: str, patch_centers, patch_size: int) -> torch.Tensor:
parts = self.compose_text_with_templates(class_str, part_templates)
tokens = clip.tokenize(parts).to(self.device)
text_features = self.encode_text(tokens).detach()
patches = self.generate_patches(img, patch_centers, patch_size)
image_features = self.get_image_features(patches)
similarity = image_features @ text_features.T
return similarity
def clip_patch_similarity(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
patch_size = 196 #TODO remove magic number
patch_centers = self.random_patch_centers(src_img.shape, 4, patch_size) #TODO remove magic number
src_scores = self.patch_scores(src_img, source_class, patch_centers, patch_size)
target_scores = self.patch_scores(target_img, target_class, patch_centers, patch_size)
return self.patch_loss(src_scores, target_scores)
def patch_directional_loss(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str) -> torch.Tensor:
if self.patch_text_directions is None:
src_part_classes = self.compose_text_with_templates(source_class, part_templates)
target_part_classes = self.compose_text_with_templates(target_class, part_templates)
parts_classes = list(zip(src_part_classes, target_part_classes))
self.patch_text_directions = torch.cat([self.compute_text_direction(pair[0], pair[1]) for pair in parts_classes], dim=0)
patch_size = 510 # TODO remove magic numbers
patch_centers = self.random_patch_centers(src_img.shape, 1, patch_size)
patches = self.generate_patches(src_img, patch_centers, patch_size)
src_features = self.get_image_features(patches)
patches = self.generate_patches(target_img, patch_centers, patch_size)
target_features = self.get_image_features(patches)
edit_direction = (target_features - src_features)
edit_direction /= edit_direction.clone().norm(dim=-1, keepdim=True)
cosine_dists = 1. - self.patch_direction_loss(edit_direction.unsqueeze(1), self.patch_text_directions.unsqueeze(0))
patch_class_scores = cosine_dists * (edit_direction @ self.patch_text_directions.T).softmax(dim=-1)
return patch_class_scores.mean()
def cnn_feature_loss(self, src_img: torch.Tensor, target_img: torch.Tensor) -> torch.Tensor:
src_features = self.encode_images_with_cnn(src_img)
target_features = self.encode_images_with_cnn(target_img)
return self.texture_loss(src_features, target_features)
def forward(self, src_img: torch.Tensor, source_class: str, target_img: torch.Tensor, target_class: str, texture_image: torch.Tensor = None):
clip_loss = 0.0
if self.lambda_global:
clip_loss += self.lambda_global * self.global_clip_loss(target_img, [f"a {target_class}"])
if self.lambda_patch:
clip_loss += self.lambda_patch * self.patch_directional_loss(src_img, source_class, target_img, target_class)
if self.lambda_direction:
clip_loss += self.lambda_direction * self.clip_directional_loss(src_img, source_class, target_img, target_class)
if self.lambda_manifold:
clip_loss += self.lambda_manifold * self.clip_angle_loss(src_img, source_class, target_img, target_class)
if self.lambda_texture and (texture_image is not None):
clip_loss += self.lambda_texture * self.cnn_feature_loss(texture_image, target_img)
return clip_loss
|
"""
Primeira Classe - criada
"""
# Classe pep 8 - Função letra minuscula e Classe letra maiuscula
# Colocar o estado de todos arquivos, tirando uma foto, fazendo o Push para enviar no repositorio no GitHub; direito na pasta -> Git - > Commit Directory
# Subir no GitHub depois da Print, CTRl+SHIFT+K OU direito na pasta -> Git - > PUSH
# Atribuir Metode (Função que pertence a uma classe, sempre conectado a um objeto
class Pessoa: # Objeto tipo pessoa, dentro de uma lista que é atributo que é instancia da própria classe pessoa.
olhos = 2 # Criano atributo Default ou atributo de Classe
def __init__(self, *filhos, nome = None, idade=35):
self.idade = idade
self.nome = nome # Criando método especial -- self.nome ( O nome é o nome do objeto self)
self.filhos = list(filhos) # Criando a lista filhos, objeto complexo
# Atributos de instância e de objetos são criados atreves do metódo __init__
def cumprimentar(self): # Metodo: cumprimentar / Objeto indice:self
return f'Olá, meu nome é: {self.nome}' # f string!
@staticmethod # Criar metodo estatico da classe, independe do objeto. Decoraitor começa com @
def metodo_estatico(): # Não precisa informar o parametro, devido o metodo ser da classe
return 42
@classmethod # Criar metodo estatico da classe, independe do objeto. Decoraitor começa com @. Tera acesso a classe que esta executando.
def nome_e_atributos_de_classe(cls): # cls é preenchido automatico, cls = 'class'
return f'{cls} - olhos {cls.olhos}' # Acessar o atribuo olhos da classe Pessoa
class testando_metodo_super(Pessoa):
pass
# Herença, reutilizar o código de uma classe ja pré-existente
class Homem(testando_metodo_super):
def cumprimentar(self):
#cumprimentar_da_classe = Pessoa.cumprimentar(self) # Não é usual, porque se for pegar da classe Mutante, teria problema.
cumprimentar_da_classe = super().cumprimentar() # Utilizar o método especial super, acessa os elementos da classe pai, seja ela quem for, não precisa estar descrita no ().
return f'{cumprimentar_da_classe}. Aperto de mão'
class Mutante(Pessoa):
olhos = 3
if __name__ == '__main__':
guilherme = Mutante(nome='Guilherme') # Alterando o nome ja na construção e Utilizando a classe herdada Homem.
sabina = Homem(guilherme,nome='Sabina') # Guilherme entra como filho da Sabina.
print(Pessoa.cumprimentar(sabina)) # Não é usual executar o método desta maneira
print(id(sabina))
print(sabina.cumprimentar()) # p: objeto, objeto.método, maneira mais usual!!! - ATRIBUTO DA CLASSE
print(sabina.nome) # Acessar o atributo atraves do objeto
# p.nome = 'Guilherme' # Alterar o valor do atributo!
# print(p.nome)
print(sabina.idade)
for filho in sabina.filhos:
print('Filhos:', filho.nome) # Lista com todos os filhos
guilherme.sobrenome = 'Neto' # Adicinando atributo para objeto especifico de maneira DINÂMICA
del sabina.filhos # Deleta os atributos do objeto sabina, remover de forms DINÂMICA, não é boa prática!
print('Mostra o __dict__ de Guilherme: ', guilherme.__dict__) # Atributo especial __dict__, acessa atributos de instância do objeto guilherme, todos os atributos complexos e dinâmicos
print('Mostra o __dict__ de Sabina: ', sabina.__dict__)
print('---' * 30)
print('Mostrando o atributo da classe (olhos):', Pessoa.olhos)
print('Atributo da classe, acessado pelo ojeto guilherme:', guilherme.olhos)
print("Mostrando que o id é igual para todos os acesso:", id(Pessoa.olhos), "-", id(guilherme.olhos), '-', id(sabina.olhos))
print('---' * 30)
print('Mostrando o metodo estático da classe:', Pessoa.metodo_estatico(),'\nMetodo estático da classe acessado pelo objeto guilherme:', guilherme.metodo_estatico())
print('---'*30)
print('Mostrando o metodo da classe:', Pessoa.nome_e_atributos_de_classe(),'\nMetodo da classe acessado pelo objeto guilherme:', guilherme.nome_e_atributos_de_classe())
print('---'*30)
# Se o objeto pessoa é do tipo Pessoa.
pessoa = Pessoa('Anonimo')
print('Se o objeto pessoa é do tipo Pessoa.')
print(isinstance(pessoa, Pessoa))
print('Se o objeto pessoa é do tipo Homem.')
print(isinstance(pessoa, Homem))
print('Se a instância de homem pessoa é do tipo Pessoa.')
print(isinstance(guilherme, Pessoa))
print('Se a instância de homem pessoa é do tipo Homem.')
print(isinstance(guilherme, Homem))
print('---' * 30)
# Sobrescria de Atributos
print('Mostra quantos olhos o Guilherme tem: ', guilherme.olhos)
print('---' * 30)
# Sobrescria de Metodo
print('', guilherme.cumprimentar())
print('', sabina.cumprimentar())
|
import os
import subprocess
import sys
import tarfile
import tempfile
from dataclasses import asdict
import numpy as np
import onnxruntime as ort
import torch
import torchvision
import yaml
from arachne.data import Model, ModelFormat, ModelSpec, TensorSpec
from arachne.tools.torch2onnx import Torch2ONNX, Torch2ONNXConfig
def check_torch2onnx_output(torch_model, input_shape, onnx_model_path):
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) # type: ignore
torch_model.eval()
torch_input = torch.from_numpy(input_data).clone()
dout = torch_model(torch_input).to("cpu").detach().numpy().copy()
sess = ort.InferenceSession(onnx_model_path, providers=["CPUExecutionProvider"])
input_name = sess.get_inputs()[0].name
aout = sess.run(output_names=None, input_feed={input_name: input_data})[0]
np.testing.assert_allclose(aout, dout, atol=1e-5, rtol=1e-5) # type: ignore
def test_torch2onnx():
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
resnet18 = torchvision.models.resnet18(pretrained=True)
torch.save(resnet18, f="resnet18.pt")
spec = ModelSpec(
inputs=[TensorSpec(name="input0", shape=[1, 3, 224, 224], dtype="float32")],
outputs=[TensorSpec(name="output0", shape=[1, 1000], dtype="float32")],
)
input_model = Model(path="resnet18.pt", format=ModelFormat.PYTORCH, spec=spec)
cfg = Torch2ONNXConfig()
output = Torch2ONNX.run(input_model, cfg)
check_torch2onnx_output(resnet18, [1, 3, 224, 224], output.path)
def test_cli():
# Due to the test time, we only test one case
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
resnet18 = torchvision.models.resnet18(pretrained=True)
model_path = "resnet18.pt"
torch.save(resnet18, f=model_path)
spec = ModelSpec(
inputs=[TensorSpec(name="input0", shape=[1, 3, 224, 224], dtype="float32")],
outputs=[TensorSpec(name="output0", shape=[1, 1000], dtype="float32")],
)
with open("spec.yaml", "w") as file:
yaml.dump(asdict(spec), file)
ret = subprocess.run(
[
sys.executable,
"-m",
"arachne.driver.cli",
"+tools=torch2onnx",
f"model_file={model_path}",
"model_spec_file=spec.yaml",
"output_path=output.tar",
]
)
assert ret.returncode == 0
model_file = None
with tarfile.open("output.tar", "r:gz") as tar:
for m in tar.getmembers():
if m.name.endswith(".onnx"):
model_file = m.name
tar.extractall(".")
assert model_file is not None
check_torch2onnx_output(resnet18, [1, 3, 224, 224], model_file)
|
# Generated by Django 2.2.8 on 2020-01-08 13:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('minke', '0005_auto_20190810_1412'),
]
operations = [
migrations.AlterField(
model_name='hostgroup',
name='name',
field=models.CharField(help_text='Unique group-name.', max_length=128, unique=True, verbose_name='Group-Name'),
),
]
|
from torch import nn
class VentilatorNet(nn.Module):
def __init__(self,
input_dim: int = 4,
lstm_dim: int = 256,
dense_dim: int = 256,
logit_dim: int = 256,
n_classes: int = 1,
) -> None:
"""
Model class.
Args:
cfg: main config
"""
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(input_dim, dense_dim // 2),
nn.ReLU(),
nn.Linear(dense_dim // 2, dense_dim),
nn.ReLU(),
)
self.lstm = nn.LSTM(dense_dim, lstm_dim, batch_first=True, bidirectional=True)
self.logits = nn.Sequential(
nn.Linear(lstm_dim * 2, logit_dim),
nn.ReLU(),
nn.Linear(logit_dim, n_classes),
)
def forward(self, x):
features = self.mlp(x['input'])
features, _ = self.lstm(features)
pred = self.logits(features)
return pred
|
""" Reserve slots for teams.
This module reserve slots for teams that have
several competitors in the same brackets.
In order to keep track of already existing
team pairings it uses a dictionary: team_pairing_count.
Is a dictionary of ordered
tuples (min, max), with the count of times
that those two teams are matched (min, can not
be 0, as it is an empty slot).
"""
from collections import defaultdict, Counter
from bracketool.domain import Competitor, Clash
from bracketool.brackets import generate_first_round_clashes
from bracketool.brackets import brackets_max_depth_distance
from bracketool.brackets import brackets_depth_distance
import math
import random
def assign_team_to_clash(clashes, reservations, clash_idx, team,
team_pairing_count=None):
"""
Assigns a slot to a team and updates the team pairing
counts.
The clash must not be fully reserved or it will raise an IndexError
exception.
"""
reserv = reservations[clash_idx]
if len(reserv) == 2:
raise IndexError('No empty space in clash idx %d' % clash_idx)
elif len(reserv) == 1 and team_pairing_count is not None:
other_team = reserv[0]
pt = (min(other_team, team), max(other_team, team))
cnt = team_pairing_count.setdefault(pt, 0) + 1
team_pairing_count[pt] = cnt
elif clashes[clash_idx].is_bye:
bye_cnt = team_pairing_count.setdefault((None, team), 0) + 1
team_pairing_count[(None, team)] = bye_cnt
reserv.append(team)
def shuffle_teams_sorted_by_slots(teams_with_required_slots, rnd):
"""Sort the teams by number of slots required, and inside the list
of those with the same required slots, randomizes the order using rnd.
:param dict teams_with_required_slots: mapping of team ids to
number of competitors
:returns list: the teams sorted by number of competitors
"""
d = defaultdict(list)
# group teams by count:
for team, count in teams_with_required_slots.items():
d[count].append(team)
res = []
for key in sorted(d.keys(), reverse=True):
rnd.shuffle(d[key])
res.extend(d[key])
return res
def rate_clash_for_team(reservations, clashes, clash_idx, team,
team_pairing_count):
"""
Gives a rating number for that spot.
A smaller number for better spots, and bigger number
for worse ones.
Best spot -> not pairing
Depending on the number of already paired
Bye is better than pairing again with the same team
Worse spot -> pairing with same team
complexity: O(n)
"""
clash = clashes[clash_idx]
reserv = reservations[clash_idx]
if len(reserv) == 2 or (len(reserv) == 1 and clash.is_bye):
# there is no place in this clash, already reserved
return None
same_team_factor = len(clashes) * len(clashes) * 4
rating = 0
if team in reserv:
# special penalization to face a member of the same team:
rating = pow(same_team_factor, 127)
if clash.is_bye:
rating += team_pairing_count.get((None, team), 0)
# this converts this function in quadratic complexity but gives
# more precise ratings
mdd = brackets_max_depth_distance(clashes)
for other_idx, other_reserv in enumerate(reservations):
d = brackets_depth_distance(clashes, other_idx, clash_idx)
for other_team in other_reserv:
pt = (min(other_team, team), max(other_team, team))
penalty = team_pairing_count.get(pt, 0)
if other_team == team:
penalty = penalty + same_team_factor
rating = rating + (mdd + 1 - d) * penalty
return rating
def reserve_slots_for_team(reservations, clashes, team, required_slots,
team_pairing_count, rnd):
"""
Assign the slots for the members of a team.
"""
ratings = []
for _ in range(required_slots):
ratings = [(rate_clash_for_team(reservations, clashes, idx, team,
team_pairing_count), idx)
for idx in range(len(clashes))]
ratings = [(rate, idx) for rate, idx in ratings if rate is not None]
ratings.sort()
assign_team_to_clash(clashes, reservations, ratings[0][1], team,
team_pairing_count)
def reserve_team_slots(clashes, competitors, team_pairing_count, rnd=None,
assign_single_competitor_teams=True):
"""
:param clashes: the list of first round clashes
:param competitors: the list of competitors
:param team_pairing_count: a dict, counting the number of times that
one team has already been paired with each other team.
:param rnd: a random object to select how teams with same number
of slots are ordered.
:param assign_single_competitor_teams: if set to true, it takes into
account the team_pairing_count_param to reduce the number
of same team pairings. If not, those empty spots can later be
assigned using the competitor rating.
:returns: list of team reservations for each clash:
[[team_a, team_b], [team_c,], [], [team_b,]]
"""
# we need to put first the teams with more people first, but
# also some sort of randomness among the ones that have the same
# number of people
reservations = [list() for _ in clashes]
if rnd is None:
rnd = random.Random()
rnd.seed()
teams_with_required_slots = Counter([comp.team for comp in competitors
if comp.team is not None])
sorted_teams = shuffle_teams_sorted_by_slots(teams_with_required_slots, rnd)
for team in sorted_teams:
cnt = teams_with_required_slots[team]
if cnt == 1 and not assign_single_competitor_teams:
return reservations
reserve_slots_for_team(reservations=reservations,
clashes=clashes,
team=team,
required_slots=cnt,
team_pairing_count=team_pairing_count,
rnd=rnd)
return reservations
def create_reserved_teams_bracket_clashes(competitors,
team_pairing_count=None,
rnd=None,
assign_single_competitor_teams=True):
"""
Initialize the brackets with the number of participants
in the tournament.
Creates the empty brackets with byes filled.
It reserves slots for teams, but does not assign the
slots to individuals.
Filling of the team slots with team members must
be accomplished with other class, that can take into
account: ranking, previous matches with the same
competitor, etc ...
:param competitors: A list of competitors `bracketool.domain.Competitor`
:param team_pairing_count: a map that counts the number of times
that two teams have already faced each other. Can be used to
add more variability, if we have other brackets where the
same teams participate. Can be None, if we don't care about
other brackets.
:param rnd: The random.Random object use to shuffle teams that have
the same number of competitors
:param assign_single_competitor_teams: To reserve spots for teams
that only have one competitor (Setting it to False, would
allow more flexibility to assign competitor by other properties
like the rank).
:returns: a list of clashes with non assigned competitors, and
the list of team reservations.
"""
clashes = generate_first_round_clashes(len(competitors))
# create a temporary track of how team pairing would be distributed,
# but real team_pairing_count is updated when competitors are
# assigned
if team_pairing_count is None:
pairing_count = {}
else:
pairing_count = dict(team_pairing_count)
team_reservations = reserve_team_slots(
clashes, competitors, pairing_count, rnd,
assign_single_competitor_teams)
return clashes, team_reservations
def clashes_team_count(team_reservations):
return Counter([team for reserv in team_reservations for team in reserv])
|
# encoding: utf-8
# module Tessellation.Adapters calls itself Adapters
# from Tessellation, Version=1.2.1.3083, Culture=neutral, PublicKeyToken=null
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class Cell2(TriangulationCell[Vertex2, Cell2]):
""" Cell2() """
Centroid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Centroid(self: Cell2) -> Point
"""
Circumcenter = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Circumcenter(self: Cell2) -> Point
"""
class Vertex2(object, IVertex, IGraphicItem):
""" Vertex2(x: float, y: float) """
def AsPoint(self):
""" AsPoint(self: Vertex2) -> Point """
pass
def AsVector(self):
""" AsVector(self: Vertex2) -> Vector """
pass
@staticmethod
def FromUV(uv):
""" FromUV(uv: UV) -> Vertex2 """
pass
def Tessellate(self, package, parameters):
""" Tessellate(self: Vertex2, package: IRenderPackage, parameters: TessellationParameters) """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, x, y):
""" __new__(cls: type, x: float, y: float) """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
Position = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Position(self: Vertex2) -> Array[float]
Set: Position(self: Vertex2) = value
"""
|
# Generated by Django 3.0.6 on 2020-05-22 04:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("teams", "0006_join_code_requires_less_states"),
("mediahub", "0006_require_fk_relationship"),
("events", "0005_make_fk_and_uuid_required"),
]
operations = [migrations.RenameModel(old_name="TeamEvent", new_name="Event")]
|
# -*- coding: utf-8 -*-
import struct
from src.crypt import gf28, sha256, crypt_modes
from src.crypt.utils import rol_int_bytes, ror_int_bytes, get_byte_from_int, set_byte_in_int, add_padding_to_block, \
remove_padding_from_block
__author__ = "zebraxxl"
__sBox = [
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
]
__invSBox = [
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
]
__rCon = [
0x00000000,
0x01000000,
0x02000000,
0x04000000,
0x08000000,
0x10000000,
0x20000000,
0x40000000,
0x80000000,
0x1b000000,
0x36000000,
]
def __block2state(block):
result = []
for r in range(4):
row = []
for c in range(4):
row.append(block[c * 4 + r])
result.append(struct.unpack(">I", bytes(row))[0])
return result
def __state2block(state):
result = []
for c in range(4):
for r in range(4):
result.append(get_byte_from_int(state[r], 3 - c))
return bytes(result)
def __sub_bytes(state):
for i, v in enumerate(state):
state[i] = __sBox[(v & 0xff000000) >> 24] << 24 | \
__sBox[(v & 0x00ff0000) >> 16] << 16 | \
__sBox[(v & 0x0000ff00) >> 8] << 8 | \
__sBox[(v & 0x000000ff)]
def __inv_sub_bytes(state):
for i, v in enumerate(state):
state[i] = __invSBox[(v & 0xff000000) >> 24] << 24 | \
__invSBox[(v & 0x00ff0000) >> 16] << 16 | \
__invSBox[(v & 0x0000ff00) >> 8] << 8 | \
__invSBox[(v & 0x000000ff)]
def __shift_rows(state):
for i, v in enumerate(state):
state[i] = rol_int_bytes(v, i)
def __inv_shift_rows(state):
for i, v in enumerate(state):
state[i] = ror_int_bytes(v, i)
def __mix_columns(state):
for c in range(4):
col = (get_byte_from_int(state[0], c),
get_byte_from_int(state[1], c),
get_byte_from_int(state[2], c),
get_byte_from_int(state[3], c))
state[0] = set_byte_in_int(
state[0], c, gf28.mul_by_2[col[0]] ^ col[3] ^ col[2] ^ gf28.mul_by_3[col[1]])
state[1] = set_byte_in_int(
state[1], c, gf28.mul_by_2[col[1]] ^ col[0] ^ col[3] ^ gf28.mul_by_3[col[2]])
state[2] = set_byte_in_int(
state[2], c, gf28.mul_by_2[col[2]] ^ col[1] ^ col[0] ^ gf28.mul_by_3[col[3]])
state[3] = set_byte_in_int(
state[3], c, gf28.mul_by_2[col[3]] ^ col[2] ^ col[1] ^ gf28.mul_by_3[col[0]])
def __inv_mix_columns(state):
for c in range(4):
col = (get_byte_from_int(state[0], c),
get_byte_from_int(state[1], c),
get_byte_from_int(state[2], c),
get_byte_from_int(state[3], c))
state[0] = set_byte_in_int(
state[0], c,
gf28.mul_by_e[col[0]] ^ gf28.mul_by_9[col[3]] ^ gf28.mul_by_d[col[2]] ^ gf28.mul_by_b[col[1]])
state[1] = set_byte_in_int(
state[1], c,
gf28.mul_by_e[col[1]] ^ gf28.mul_by_9[col[0]] ^ gf28.mul_by_d[col[3]] ^ gf28.mul_by_b[col[2]])
state[2] = set_byte_in_int(
state[2], c,
gf28.mul_by_e[col[2]] ^ gf28.mul_by_9[col[1]] ^ gf28.mul_by_d[col[0]] ^ gf28.mul_by_b[col[3]])
state[3] = set_byte_in_int(
state[3], c,
gf28.mul_by_e[col[3]] ^ gf28.mul_by_9[col[2]] ^ gf28.mul_by_d[col[1]] ^ gf28.mul_by_b[col[0]])
def __add_round_key(state, key_schedule):
for r, _ in enumerate(state):
state[r] ^= get_byte_from_int(key_schedule[0], 3 - r) << 24 | \
get_byte_from_int(key_schedule[1], 3 - r) << 16 | \
get_byte_from_int(key_schedule[2], 3 - r) << 8 | \
get_byte_from_int(key_schedule[3], 3 - r)
def __sub_word(a):
return __sBox[get_byte_from_int(a, 3)] << 24 | \
__sBox[get_byte_from_int(a, 2)] << 16 | \
__sBox[get_byte_from_int(a, 1)] << 8 | \
__sBox[get_byte_from_int(a, 0)]
def __cipher_block(block, key_schedule, nr):
state = __block2state(block)
__add_round_key(state, key_schedule[0:4])
for r in range(1, nr):
__sub_bytes(state)
__shift_rows(state)
__mix_columns(state)
__add_round_key(state, key_schedule[r * 4:r * 4 + 4])
__sub_bytes(state)
__shift_rows(state)
__add_round_key(state, key_schedule[nr * 4:nr * 4 + 4])
return __state2block(state)
def __inv_cipher_block(block, key_schedule, nr):
state = __block2state(block)
__add_round_key(state, key_schedule[nr * 4:nr * 4 + 4])
for r in range(nr - 1, 0, -1):
__inv_shift_rows(state)
__inv_sub_bytes(state)
__add_round_key(state, key_schedule[r * 4:r * 4 + 4])
__inv_mix_columns(state)
__inv_shift_rows(state)
__inv_sub_bytes(state)
__add_round_key(state, key_schedule[0:4])
return __state2block(state)
def __key_expansion(key, nk, nr):
key_schedule = []
words_nk = nk // 4
for i in range(0, words_nk):
key_schedule.append(struct.unpack(">I", key[i * 4:i * 4 + 4])[0])
for i in range(words_nk, 4 * (nr + 1)):
temp = key_schedule[i - 1]
if (i % words_nk) == 0:
temp = __sub_word(rol_int_bytes(temp, 1)) ^ __rCon[i // words_nk]
elif (words_nk > 6) and ((i % words_nk) == 4):
temp = __sub_word(temp)
key_schedule.append(key_schedule[i - words_nk] ^ temp)
return key_schedule
def encode(message, key, mode="CBC", iv=None):
key_length = len(key)
if (key_length != 16) and (key_length != 24) and (key_length != 32):
key = sha256.sha256_hash(key)
message = add_padding_to_block(message, 16)
nk = len(key)
nr = 10 if nk == 16 else (12 if nk == 24 else 14)
key_schedule = __key_expansion(key, nk, nr)
return crypt_modes.encode(mode, message, 16, key_schedule, lambda b, k: __cipher_block(b, k, nr),
iv)
def decode(message, key, mode="CBC", iv=None):
key_length = len(key)
if (key_length != 16) and (key_length != 24) and (key_length != 32):
key = sha256.sha256_hash(key)
nk = len(key)
nr = 10 if nk == 16 else (12 if nk == 24 else 14)
key_schedule = __key_expansion(key, nk, nr)
return remove_padding_from_block(
crypt_modes.decode(mode, message, 16, key_schedule, lambda b, k: __inv_cipher_block(b, k, nr),
iv)
)
|
'''
Pandas Module for external dataframes
Inherit and extend for particular patterns. It is a bit of a misnomer to use the
term "dataframe", since there are very few expected attributes and they are by no
means unique to pandas.
'''
__author__ = 'Elisha Yadgaran'
import pandas as pd
from itertools import chain
from typing import List, Union, Optional
from simpleml.datasets.abstract_mixin import AbstractDatasetMixin
from simpleml.utils.errors import DatasetError
from simpleml.pipelines.validation_split_mixins import Split
DATAFRAME_SPLIT_COLUMN: str = 'DATASET_SPLIT'
class BasePandasDatasetMixin(AbstractDatasetMixin):
'''
Pandas mixin class with control mechanism for `self.dataframe` of
type `dataframe`. Mostly assumes pandas syntax, not types, so may be compatible
with pandas drop-in replacements. Recommended to implement a parallel mixin
for other frameworks though
In particular:
A - type of pd.DataFrame:
- query()
- columns
- drop()
- squeeze()
WARNING: Needs to be used as a base class for datasets because it overwrites
the standard dataset dataframe property
'''
@property
def X(self) -> pd.DataFrame:
'''
Return the subset that isn't in the target labels (across all potential splits)
'''
return self.get(column='X', split=None)
@property
def y(self) -> pd.DataFrame:
'''
Return the target label columns
'''
return self.get(column='y', split=None)
@property
def _dataframe(self) -> pd.DataFrame:
'''
Overwrite base behavior to return a copy of the data in case consumers
attempt to mutate the data structure
Only copies the pandas container - underlying cell objects can still propagate
inplace mutations (eg lists, dicts, objects)
'''
# return a copy so mutations can happen inplace with memory efficient objects
return self._external_file.copy()
@_dataframe.setter
def _dataframe(self, df: pd.DataFrame) -> None:
'''
Setter method for self._external_file
Allows mixins/subclasses to validate input
'''
self._external_file = df
def _validate_dtype(self, df: pd.DataFrame) -> None:
'''
Validating setter method for self._external_file
Checks input is of type pd.DataFrame
'''
if not isinstance(df, pd.DataFrame):
raise DatasetError('Pandas Datasets must be of type `pd.DataFrame`')
def get(self, column: Optional[str], split: Optional[str]) -> pd.DataFrame:
'''
Explicitly split validation splits
Uses self.label_columns to separate x and y columns inside the returned dataframe
returns empty dataframe for missing combinations of column & split
'''
registered_sections = self.config.get('split_section_map')
if column is not None and column != 'X' and column not in registered_sections:
raise ValueError(f'Only support registered sections: {registered_sections}, X, or None')
dataframe = self.dataframe # copy
# choose the columns to slice from the dataframe
if column is None: # All except internal columns
return_columns = [col for col in dataframe.columns if col != DATAFRAME_SPLIT_COLUMN]
elif column != 'X':
# other passthrough columns
return_columns = registered_sections[column]
else: # X
all_other_columns = list(chain(*registered_sections.values()))
return_columns = [
col for col in dataframe.columns
if col != DATAFRAME_SPLIT_COLUMN
and col not in all_other_columns
]
return self._get(dataframe=dataframe, columns=return_columns, split=split)
@staticmethod
def _get(dataframe: pd.DataFrame, columns: List[str], split: str) -> pd.DataFrame:
'''
Internal method to extract data subsets from a dataframe
:param dataframe: the dataframe to subset from
:param columns: List of columns to slice from the dataframe
:param split: row identifiers to slice rows (in internal column mapped to `DATAFRAME_SPLIT_COLUMN`)
'''
if split is not None: # Return the full dataset (all splits) - already a copy
# query automatically returns a copy wisth a weakref
if DATAFRAME_SPLIT_COLUMN not in dataframe.columns:
raise DatasetError('Cannot retrieve dataset split `{split}` from dataframe without `{DATAFRAME_SPLIT_COLUMN}` column')
dataframe = dataframe.query("{}=='{}'".format(DATAFRAME_SPLIT_COLUMN, split))
# inplace drop extra columns
drop_columns = [col for col in dataframe.columns if col not in columns]
if drop_columns:
dataframe.drop(drop_columns, axis=1, inplace=True)
# Last check in case any of the operations created a view or weakref copy
if (hasattr(dataframe, '_is_view') and dataframe._is_view) or \
(hasattr(dataframe, '_is_copy') and dataframe._is_copy is not None):
dataframe = dataframe.copy()
return dataframe
def get_split(self, split: Optional[str]) -> Split:
'''
Wrapper accessor to return a split object (for internal use)
'''
registered_sections = self.config.get('split_section_map')
return Split(
# explicitly get X as the "other" columns
X=self.get(column='X', split=split),
# should include y and any others if they exist
**{section: self.get(split=split, column=section) for section in registered_sections}
).squeeze()
def get_split_names(self) -> List[str]:
'''
Helper to expose the splits contained in the dataset
'''
df = self.dataframe
if DATAFRAME_SPLIT_COLUMN in df.columns:
return df[DATAFRAME_SPLIT_COLUMN].unique().tolist()
else:
return []
@staticmethod
def concatenate_dataframes(dataframes: List[pd.DataFrame],
split_names: List[str]) -> pd.DataFrame:
'''
Helper method to merge dataframes into a single one with the split
specified under `DATAFRAME_SPLIT_COLUMN`
'''
for df, name in zip(dataframes, split_names):
df[DATAFRAME_SPLIT_COLUMN] = name
# Join row wise - drop index in case duplicates exist
return pd.concat(dataframes, axis=0, ignore_index=True)
@staticmethod
def merge_split(split: Split) -> pd.DataFrame:
'''
Helper method to merge all dataframes in a split object into a single df
does a column-wise join
ex: `df1 = [A, B, C](4 rows)` + `df2 = [D, E, F](4 rows)`
returns: `[A, B, C, D, E, F](4 rows)`
'''
return pd.concat(list(split.values()), axis=1)
def get_feature_names(self) -> List[str]:
'''
Should return a list of the features in the dataset
'''
return self.X.columns.tolist()
@staticmethod
def load_csv(filename: str, **kwargs) -> pd.DataFrame:
'''Helper method to read in a csv file'''
return pd.read_csv(filename, **kwargs)
@staticmethod
def squeeze_dataframe(df: pd.DataFrame) -> pd.Series:
'''
Helper method to run dataframe squeeze and return a series
'''
return df.squeeze(axis=1)
class MultiLabelPandasDatasetMixin(BasePandasDatasetMixin):
'''
Multilabel implementation of pandas dataset - same as base for now
'''
pass
class SingleLabelPandasDatasetMixin(BasePandasDatasetMixin):
'''
Customized label logic for single label (y dimension = 1) datasets
'''
def _validate_schema(self, df: pd.DataFrame):
'''
Extend validation to check df has only a single column for the y section
'''
# validate single label status
labels = self.label_columns
if len(labels) != 1:
raise DatasetError(f'SingleLabelPandasDataset requires exactly one label column, {len(labels)} found')
@property
def label_column(self):
labels = self.label_columns
# validate single label status
if len(labels) != 1:
raise DatasetError(f'SingleLabelPandasDataset requires exactly one label column, {len(labels)} found')
return labels[0]
def get(self, column: str, split: str) -> Union[pd.Series, pd.DataFrame]:
'''
Extends PandasDatasetMixin.get with logic to squeeze labels to a
series (1D frame)
'''
data = super().get(column=column, split=split)
if column == 'X':
return data
# Custom logic for other split sections
# 1D dataframe can squeeze to a series
return self.squeeze_dataframe(data)
|
"""The settings file migration base class."""
from typing import Dict
from ...content_defs import ContentView
from ...content_defs import SerializationFormat
from ..ansi import COLOR
from ..ansi import changed
from ..ansi import info
from ..serialize import Loader
from ..serialize import serialize_write_file
from ..serialize import yaml
from .definitions import Migration
class SettingsFile(Migration):
"""The settings file migration base class."""
name = "Settings file migration base class"
def __init__(self):
"""Initialize the settings file migration."""
super().__init__()
self.content: Dict = {}
self._backup_suffix = ".v0"
def run(self, *args, **kwargs) -> None:
"""Perform the settings file migration.
:param args: Positional arguments
:param kwargs: Keyword arguments
"""
if not self.content:
with self.settings_file_path.open("r", encoding="utf-8") as f:
try:
self.content = yaml.load(f, Loader=Loader)
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
return
if not self.content:
return
# Check if any of the migrations are needed
if self.check:
self.run_steps()
self.was_needed = self.needed_now
return
# Not check and wasn't needed
if not self.was_needed:
return
self.run_steps()
# Something may have gone wrong
if self.needed_now:
return
# Back up the current
backup = self.settings_file_path.rename(
self.settings_file_path.with_suffix(self._backup_suffix),
)
info(color=COLOR, message=f"Backup: {backup}")
# Write the new file
if self.settings_file_path.suffix in (".yml", ".yaml"):
serialization_format = SerializationFormat.YAML
elif self.settings_file_path.suffix == ".json":
serialization_format = SerializationFormat.JSON
serialize_write_file(
content=self.content,
content_view=ContentView.NORMAL,
file_mode="w",
file=self.settings_file_path,
serialization_format=serialization_format,
)
changed(color=COLOR, message=f"Updated: {self.settings_file_path}")
return
|
# syft relative
from ..ast.globals import Globals
from ..lib.python import create_python_ast
from ..lib.torch import create_torch_ast
from ..lib.torchvision import create_torchvision_ast
from .misc import create_union_ast
# now we need to load the relevant frameworks onto the node
def create_lib_ast() -> Globals:
python_ast = create_python_ast()
torch_ast = create_torch_ast()
torchvision_ast = create_torchvision_ast()
# numpy_ast = create_numpy_ast()
lib_ast = Globals()
lib_ast.add_attr(attr_name="syft", attr=python_ast.attrs["syft"])
lib_ast.add_attr(attr_name="torch", attr=torch_ast.attrs["torch"])
lib_ast.add_attr(attr_name="torchvision", attr=torchvision_ast.attrs["torchvision"])
# let the misc creation be always the last, as it needs the full ast solved
# to properly generated unions
misc_ast = getattr(getattr(create_union_ast(lib_ast), "syft"), "lib")
misc_root = getattr(getattr(lib_ast, "syft"), "lib")
misc_root.add_attr(attr_name="misc", attr=misc_ast.attrs["misc"])
# lib_ast.add_attr(attr_name="numpy", attr=numpy_ast.attrs["numpy"])
return lib_ast
# constructor: copyType = create_lib_ast
lib_ast = create_lib_ast()
lib_ast._copy = create_lib_ast
|
from abc import abstractmethod
from dataclasses import dataclass
from typing import Optional
import pytest
import sqlalchemy as sa
from injector import ClassProvider
from injector import inject
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.engine import Engine
from sqlalchemy.orm import mapper
from winter.core import get_injector
from winter.data import CRUDRepository
from winter.data.exceptions import NotFoundException
from winter_ddd import AggregateRoot
from winter_ddd import DomainEvent
from winter_ddd import domain_event_handler
from winter_sqlalchemy import sqla_crud
@dataclass
class MarkedAsDoneDomainEvent(DomainEvent):
entity: 'MyEntity'
class MyEntity(AggregateRoot):
def __init__(self, id_: int, name: Optional[str], lastname: Optional[str]):
super().__init__()
self.id = id_
self.name = name
self.lastname = lastname
def mark_as_done(self):
self.name = 'done'
self.domain_events.register(MarkedAsDoneDomainEvent(self))
metadata = MetaData()
my_entity_table = Table(
'my_entities',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('lastname', String),
)
mapper(MyEntity, my_entity_table)
class MyRepository(CRUDRepository[MyEntity, int]):
@abstractmethod
def find_one_by_name_and_lastname(self, name: str, lastname: str) -> Optional[MyEntity]:
pass
class MyRepositoryImpl(MyRepository):
@inject
def __init__(self, engine: Engine):
self._engine = engine
def find_one_by_name_and_lastname(self, name: str, lastname: str) -> Optional[MyEntity]:
query = sa.select([
my_entity_table.c.id,
]).where(
sa.and_(my_entity_table.c.name == name, my_entity_table.c.lastname == lastname),
)
with self._engine.connect() as connection:
rows = connection.execute(query)
rows = list(rows)
assert len(rows) <= 1
if not rows:
return None
entity_id = rows[0][0]
return self.get_by_id(entity_id)
class DomainEventHandlers:
@inject
def __init__(self, repository: MyRepository):
self._repository = repository
@domain_event_handler
def on_marked_as_done(self, event: MarkedAsDoneDomainEvent):
entity = event.entity
new_entity = MyEntity(
id_=entity.id * 100,
name='handled',
lastname='',
)
self._repository.save(new_entity)
@pytest.fixture()
def fixture():
return get_injector().get(Fixture)
class Fixture:
@inject
def __init__(self, engine: Engine):
injector = get_injector()
injector.binder.bind(MyRepository, to=ClassProvider(sqla_crud(MyRepository)))
self._engine = engine
self.repository = injector.get(MyRepository)
metadata.drop_all(bind=self._engine)
metadata.create_all(bind=self._engine)
def execute(self, sql):
return self._engine.execute(sql)
def test_count(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2), (3);')
# Act
count = fixture.repository.count()
assert count == 3
def test_delete(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2);')
entity = fixture.repository.find_by_id(1)
# Act
fixture.repository.delete(entity)
result = fixture.execute('SELECT id FROM my_entities;')
assert list(result) == [(2,)]
assert fixture.repository.find_by_id(1) is None
def test_delete_many(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2), (3);')
entity_1 = fixture.repository.find_by_id(1)
entity_3 = fixture.repository.find_by_id(3)
# Act
fixture.repository.delete_many([entity_1, entity_3])
result = fixture.execute('SELECT id FROM my_entities;')
assert list(result) == [(2,)]
assert fixture.repository.find_by_id(1) is None
assert fixture.repository.find_by_id(3) is None
def test_delete_all(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2), (3);')
# Act
fixture.repository.delete_all()
count = fixture.execute('SELECT COUNT(*) FROM my_entities;').scalar()
assert count == 0
def test_delete_by_id(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2);')
# Act
fixture.repository.delete_by_id(1)
result = fixture.execute('SELECT id FROM my_entities;')
assert list(result) == [(2,)]
def test_exists_by_id(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2);')
# Act
exists = fixture.repository.exists_by_id(2)
assert exists is True
def test_not_exists_by_id(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2);')
# Act
exists = fixture.repository.exists_by_id(3)
assert exists is False
def test_find_all(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2), (3);')
# Act
entities = fixture.repository.find_all()
ids = [entity.id for entity in entities]
assert len(ids) == 3
assert set(ids) == {1, 2, 3}
def test_find_all_by_id(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2), (3);')
# Act
entities = fixture.repository.find_all_by_id([1, 4, 3])
ids = [entity.id for entity in entities]
assert len(ids) == 2
assert set(ids) == {1, 3}
def test_find_by_id(fixture):
fixture.execute('INSERT INTO my_entities (id) VALUES (1), (2), (3);')
# Act
entity = fixture.repository.find_by_id(2)
assert entity.id == 2
def test_get_by_id(fixture):
with pytest.raises(NotFoundException, match='MyEntity with ID=2 not found'):
# Act
fixture.repository.get_by_id(2)
def test_save_new(fixture):
entity = MyEntity(id_=1, name='name', lastname='lastname')
# Act
fixture.repository.save(entity)
entities = list(fixture.repository.find_all())
assert len(entities) == 1
assert entities[0].id == 1
assert entities[0].name == 'name'
assert entities[0].lastname == 'lastname'
def test_save(fixture):
fixture.execute("INSERT INTO my_entities (id, name) VALUES (1, 'started'), (2, 'started'), (3, 'started');")
entity = fixture.repository.find_by_id(2)
entity.mark_as_done()
# Act
entity = fixture.repository.save(entity)
assert entity.name == 'done'
entities = fixture.execute('SELECT id, name FROM my_entities;').fetchall()
entities = [(id_, name) for id_, name in entities]
assert len(entities) == 4
assert set(entities) == {(1, 'started'), (2, 'done'), (3, 'started'), (200, 'handled')}
def test_save_many(fixture):
fixture.execute("INSERT INTO my_entities (id, name) VALUES (1, 'started'), (2, 'started'), (3, 'started');")
entities = fixture.repository.find_all_by_id([1, 3])
for entity in entities:
entity.mark_as_done()
# Act
entities = fixture.repository.save_many(entities)
entities = list(entities)
assert len(entities) == 2
assert all(entity.name == 'done' for entity in entities)
entities = fixture.execute('SELECT id, name FROM my_entities;').fetchall()
entities = [(id_, name) for id_, name in entities]
assert len(entities) == 5
assert set(entities) == {(1, 'done'), (2, 'started'), (3, 'done'), (100, 'handled'), (300, 'handled')}
@pytest.mark.parametrize(
'name, lastname, entity_id', [
('name', 'something', None),
('name', 'lastname', 2),
],
)
def test_find_one_by_name_and_lastname(fixture, name, lastname, entity_id):
fixture.execute("INSERT INTO my_entities (id, name, lastname) VALUES (1, 'name', NULL), (2, 'name', 'lastname');")
# Act
entity = fixture.repository.find_one_by_name_and_lastname(name, lastname)
if entity_id is None:
assert entity is None
else:
assert entity.id == entity_id
|
from logger import logging, writelog
import re
def doc_preprocess(file_name):
with open(file_name, 'r+') as file:
text = file.read()
# Remove comments (handled elsewhere but better safe than sorry)
text = re.sub(r'(^.*?)(?=%.*$)', r'\0', text, flags=re.MULTILINE)
# Remove \left and \right
text = re.sub(r'\\left|\\right', '', text)
# Replace \def with \newcommand
def def_to_newcommand(match):
out = r'\newcommand{' + match.group('name') + r'}'
if match.group('args'):
max_arg = 1
for c in match.group('args'):
try:
arg = int(c)
if arg > max_arg:
max_arg = arg
except ValueError:
pass
out += r'[{}]'.format(max_arg)
out += match.group('repl')
return out
text = re.sub(
r'\\def[\s]*(?P<name>\\[a-zA-Z]+)[\s]*(?P<args>(\[?#[0-9]\]?)*)[\s]*(?P<repl>\{.*\})',
def_to_newcommand,
text
)
# Account for white space between arguments
text = re.sub(r'(?<=\}|\])[\s]+(?=\{|\[)', '', text)
# Remove double backslash
text = re.sub(r'\\\\', '', text)
# Remove backslash canceled whitespace
text = re.sub(r'\\(?=[\s])', '', text)
# Replace file contents
file.truncate(0)
file.write(text)
|
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
def paginate(request, items, paginate_by=100):
paginated_items = Paginator(items, paginate_by)
page = request.GET.get('page', 1)
try:
page_items = paginated_items.get_page(page)
except PageNotAnInteger:
page_items = paginated_items.get_page(1)
except EmptyPage:
page_items = paginated_items.get_page(paginated_items.num_pages)
return page_items
|
import random
import math
async def message_to_xp(bot, discord, message, botconfig, platform, os, datetime, one_result, guild_result, localization, unix_time_millis, embed_color, connection, cursor, prefix):
if len(message.content) > 10: #за каждое сообщение длиной > 10 символов...
expi=one_result[8]+random.randint(5, 40) #к опыту добавляется случайное число
cursor.executemany('UPDATE users SET scores=? where userid=?', [(expi, message.author.id)])
try:
lvch=expi/(one_result[9] * (50 + (one_result[9] * 10)))
except ZeroDivisionError as zerodivide_err:
lvch=1
connection.commit()
lv=math.floor(lvch)
print(((one_result[9]) * (50 + ((one_result[9]) * 10))) * (one_result[9] + 1))
if one_result[9] < lv:
if message.content.startswith(botconfig['prefix']) is False and message.content.startswith(guild_result[6]) is False and one_result[9] > 0:
new_level_msg = discord.Embed(title=localization[1][18][0], description=str(localization[1][18][1]).format('<@' + str(message.author.id) + '>', lv), color=embed_color)
await message.channel.send(embed=new_level_msg)
cursor.executemany('UPDATE users SET level=? where userid=?', [(lv, message.author.id)])
connection.commit()
|
from qfunction import *
from qfunction.quantum import *
from qfunction.quantum.quantum_circuit import q_phi
from numpy import sin,cos
def q_vector_bloch(gamma,theta:float=False,q=1,israd=True):
gamma,theta = radian(gamma) if(not israd) else gamma, radian(theta) if(not israd) else theta
q = (q+1)/2
if(not(type(theta)==bool)):
n_xq = sin(q_phi(gamma,q))*cos(q_phi(theta,q))
n_yq = sin(q_phi(gamma,q))*cos(q_phi(theta,q))
n_zq = cos(q_phi(gamma,q))
all = [n_xq,n_yq,n_zq]
return {'all':all,'x':n_xq,'y':n_yq,'z':n_zq}
else:
n_zq = cos(q_phi(gamma,q))
return {'z':n_zq}
def q_vector_x(gamma:float,theta:float,q:float=1,israd:bool=True)-> list:
return q_vector_bloch(gamma=gamma,theta=theta,q=q,israd=israd)['x']
def q_vector_y(gamma:float,theta:float,q:float=1,israd:bool=True)-> list:
return q_vector_bloch(gamma=gamma,theta=theta,q=q,israd=israd)['y']
def q_vector_z(gamma:float,theta=False,q:float=1,israd:bool=True)-> list:
return q_vector_bloch(gamma=gamma,theta=theta,q=q,israd=israd)['z']
def density_matrix(theta:float,gamma:float,q:float=1):
1/2*np.array([[1+q_vector_z(theta=theta,gamma=gamma,q=q), q_vector_x(theta=theta,gamma=gamma,q=q)+1j*q_vector_y(theta=theta,gamma=gamma,q=q)],
[q_vector_x(theta=theta,gamma=gamma,q=q)-1j-q_vector_y(theta=theta,gamma=gamma,q=q),], 1-q_vector_z(theta=theta,gamma=gamma,q=q)])
|
def textQueries(sentences, queries):
sentences = [set(s.split(" ") for s in sentences)]
queries = [q.split(" ") for q in queries]
res = []
for q in queries:
matches = []
for i in range(len(sentences)):
isMatch = True
for w in q:
if w not in sentences[i]:
isMatch = False
break
if isMatch:
matches.append(i)
res.append(matches)
return res
|
'''
Thi is doc
yes
'''
a = 10
|
# Generated by Django 2.1.2 on 2018-11-11 17:28
import core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_user_user_type'),
]
operations = [
migrations.AlterField(
model_name='user',
name='profile_photo',
field=models.ImageField(blank=True, upload_to='profiles/%Y/%m/%d', validators=[core.validators.FileValidator(allowed_extensions=['.jpg', '.png', '.gif'], allowed_mimes=['image/jpeg', 'image/png', 'image/gif'], max_size=150, min_size=25)]),
),
]
|
class Solution:
# @param tokens, a list of string
# @return an integer
def evalRPN(self, tokens):
stack = []
for token in tokens:
if token == '+':
stack.append(stack.pop() + stack.pop())
elif token == '-':
stack.append(-stack.pop() + stack.pop())
elif token == '*':
stack.append(stack.pop() * stack.pop())
elif token == '/':
stack.append(int(float(stack.pop(-2)) / float(stack.pop())))
else:
stack.append(int(token))
return stack.pop()
|
#main_test.py
import unittest
import main
from unittest.mock import MagicMock, Mock
class TestSolver(unittest.TestCase):
"""[summary]
Args:
unittest ([type]): [description]
"""
def etl_integration(self):
main.load_naptan_data()
pass
def con_check_integration(self):
pass
def geo_check_integration(self):
pass
def report_integration(self):
pass
def visualiser_integration(self):
pass
if __name__ == "__main__":
unittest.main()
|
import unittest as u
import os
from lose import LOSE
import lose
import numpy as np
import tables as t
v = [int(i) for i in lose.__version__.split('.')]
class Tests(u.TestCase):
def setUp(self):
if os.path.isfile('./temp.h5'):
os.unlink('./temp.h5')
self.l = LOSE('./temp.h5')
def test_mk_group_valid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
self.l.newGroup(fmode='w', x=(15, 5), y=(2,))
def test_mk_group_valid2(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
self.l.newGroup(fmode='a', y=(2,))
self.l.newGroup(fmode='a', x=(15, 5))
def test_mk_group_invalid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
with self.assertRaises(ValueError):
self.l.newGroup(fmode='t', y=(2,))
def test_save_valid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
check = True
E = None
self.l.newGroup(fmode='w', x=(25, 4), y=(2,))
self.l.save(x=np.zeros((10, 25, 4)), y=np.zeros((2, 2)))
self.l.save(x=np.zeros((15, 25, 4)), y=np.zeros((5, 2)))
self.l.save(x=np.zeros((50, 25, 4)), y=np.zeros((8, 2)))
def test_save_invalid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
self.l.newGroup(fmode='w', x=(25, 4), y=(2,))
with self.assertRaises(ValueError):
self.l.save(x=np.zeros((25, 4)), y=np.zeros((2, 2)))
def test_save_invalid2(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
self.l.newGroup(fmode='w', x=(25, 4), y=(2,))
with self.assertRaises(ValueError):
self.l.save(x=np.zeros((10, 25, 4)), y=np.zeros((2, 5)))
def test_save_invalid3(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
self.l.newGroup(fmode='w', x=(25, 4), y=(2,))
with self.assertRaises(TypeError):
self.l.save(x='lul')
def test_load_valid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
a, b = self.l.load('x', 'y')
self.assertEqual(np.all(a==X), np.all(b==Y), 'should be equal')
@u.skipIf(v < [0, 6, 0], 'unsupported version')
def test_load_valid2(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
a, b = self.l.load('x', 'y', batch_obj=':5')
self.assertEqual(np.all(a==X[:5]), np.all(b==Y[:5]), 'should be equal')
@u.skipIf(v > [0, 5, 0], 'version 0.5.0 and below only')
def test_load_valid2_old(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
a, b = self.l.load('x', 'y', batch_obj='[:5]')
self.assertEqual(np.all(a==X[:5]), np.all(b==Y[:5]), 'should be equal')
def test_load_invalid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
with self.assertRaises(TypeError):
a, b = self.l.load('x', 'y', batch_obj=None)
def test_load_invalid2(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
with self.assertRaises(t.exceptions.NoSuchNodeError):
a, b = self.l.load('z', 'g', batch_obj=None)
@u.skipIf(v < [0, 5, 0], 'version 0.5 and up only')
def test_rename_group_valid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
self.l.renameGroup(x='z', y='g')
a, b = self.l.load('z', 'g')
self.assertEqual(np.all(X == a), np.all(Y == b), 'should be equal')
@u.skipIf(v < [0, 5, 0], 'version 0.5 and up only')
def test_rename_group_invalid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
with self.assertRaises(t.exceptions.NoSuchNodeError):
self.l.renameGroup(g='x')
@u.skipIf(v < [0, 4, 5], 'version 0.4.5 and up only')
def test_rm_group_valid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
self.l.removeGroup('x', 'y')
with self.assertRaises(t.exceptions.NoSuchNodeError):
a, b = self.l.load('x', 'y')
@u.skipIf(v < [0, 4, 5], 'version 0.4.5 and up only')
def test_rm_group_invalid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
with self.assertRaises(t.exceptions.NoSuchNodeError):
self.l.removeGroup('x', 'y')
@u.skipIf(v < [0, 6, 0], 'version 0.6 and up only')
def test_getShapes_valid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
a, b = self.l.getShapes('x', 'y')
self.assertEqual(a, X.shape, 'should be equal')
self.assertEqual(b, Y.shape, 'should be equal')
@u.skipIf(v > [0, 4, 5], 'version 0.4.5 and below only')
def test_getShapes_valid_old(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=(0, X.shape[1:]), y=(0, Y.shape[1:]))
self.l.save(x=X, y=Y)
a = self.l.get_hape('x')
b = self.l.get_hape('y')
self.assertEqual(a, X.shape, 'should be equal')
self.assertEqual(b, Y.shape, 'should be equal')
@u.skipIf(v < [0, 4, 5], 'version 0.4.5 and up only')
def test_getShapes_invalid(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
X = np.zeros((10, 5, 10))
Y = np.zeros((10, 5))
self.l.newGroup(fmode='w', x=X.shape[1:], y=Y.shape[1:])
self.l.save(x=X, y=Y)
with self.assertRaises(t.exceptions.NoSuchNodeError):
a = self.l.getShape('g')
# def test_generator_valid(self):
# if os.path.isfile(self.l.fname):
# os.unlink(self.l.fname)
def tearDown(self):
if os.path.isfile(self.l.fname):
os.unlink(self.l.fname)
|
# flake8: noqa
"""
# 追加/更新股票基础信息
"""
import os
import sys
import json
from typing import Any
from collections import OrderedDict
import pandas as pd
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
import baostock as bs
from vnpy.trader.constant import Exchange
from vnpy.trader.utility import load_json, load_data_from_pkb2, save_data_to_pkb2
from vnpy.data.tdx.tdx_common import get_stock_type
import baostock as bs
stock_type_map = {
"1": '股票', "2": "指数", "3": "其他"
}
STOCK_BASE_FILE = 'stock_base.pkb2'
# get_stock_base 返回数据格式
# vt_symbol: {
# 'exchange': 交易所代码
# 'code': 股票代码
# 'name': 中文名
# 'ipo_date': 上市日期
# 'out_date': 退市日期
# '类型': 股票,指数,其他
# 'type': stock_cn, index_cn,etf_cn,bond_cn,cb_cn
# 'status': '上市' '退市'
# }
def get_stock_base():
""" 获取股票基础信息"""
base_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), STOCK_BASE_FILE))
base_data = load_data_from_pkb2(base_file_name)
if base_data is None:
return update_stock_base()
else:
return base_data
def update_stock_base():
"""
更新股票基础信息
:return:
"""
base_file_name = os.path.abspath(os.path.join(os.path.dirname(__file__), STOCK_BASE_FILE))
base_data = load_data_from_pkb2(base_file_name)
if base_data is None:
base_data = dict()
login_msg = bs.login()
if login_msg.error_code != '0':
print(f'证券宝登录错误代码:{login_msg.error_code}, 错误信息:{login_msg.error_msg}')
return base_data
rs = bs.query_stock_basic()
if rs.error_code != '0':
print(f'证券宝获取沪深A股历史K线数据错误代码:{rs.error_code}, 错误信息:{rs.error_msg}')
return
# [dict] => dataframe
print(f'返回字段:{rs.fields}')
while (rs.error_code == '0') and rs.next():
row = rs.get_row_data()
exchange_code, stock_code = row[0].split('.')
exchange = Exchange.SSE if exchange_code == 'sh' else Exchange.SZSE
d = {
'exchange': exchange.value,
'code': stock_code,
'name': row[1],
'ipo_date': row[2],
'out_date': row[3],
'类型': stock_type_map.get(row[4], '其他'),
'type': get_stock_type(stock_code),
'status': '上市' if row[5] == '1' else '退市'
}
base_data.update({f'{stock_code}.{exchange.value}': d})
# print(f'{d}')
save_data_to_pkb2(base_data, base_file_name)
print(f'更新完毕')
return base_data
if __name__ == '__main__':
update_stock_base()
|
# def too(*args, **kwargs)
# host, port, user, user name and password to connect with data base
# def connect_to_db(host, port, user, username, password, **kwargs) - splat operator
# * is not a pointer like C
# reference is copy by value .i.e you are passing a referernce who is having copy of value
def swap(number1, number2):
number2, number1 = number1, number2
def update(l,index,number):
l[index] = number
l = [1,2,3]
update(l,0,6)
def empty_list(l):
while(len(l)): # l[:] = []
l.pop() # del l[-1]
l = [1,2,3]
empty_list(l)
print l
|
from tests.test_base import app, client, login
_SCRIPT_ID_HELLO = "pyscriptdemo.helloworld.HelloWorld"
_SCRIPT_ID_HELLO_WITH_PARAMS = "pyscriptdemo.helloworld.HelloWorldWithParams"
def test_hello(app, client):
login(client)
response = client.post("/api/scripts/" + _SCRIPT_ID_HELLO + "/_run")
assert response.json["success"]
assert response.json["message"] == "Hello World !"
test_output = "Hello Mister, Hello Mister, Hello Mister, Hello Mister, Hello Mister, Hello Mister, Hello Mister"
assert response.json["dataOutput"] == test_output
def test_hello_with_params(app, client):
login(client)
response = client.post("/api/scripts/" + _SCRIPT_ID_HELLO_WITH_PARAMS + "/_run")
assert response.json["success"]
assert response.json["dataOutput"]["values"] is None
params = {"a":"default value", "b":10, "c":True, "d":"My text \n in textarea", "e":"1", "f":"b"}
response = client.post("/api/scripts/" + _SCRIPT_ID_HELLO_WITH_PARAMS + "/_run", json=params)
assert response.json["success"]
assert response.json["dataOutput"]["values"] == params
assert response.json["dataOutput"]["params"] == [
{
"id": "a",
"type": "text",
"label": "Type text",
"default": "default value"
}, {
"id": "b",
"type": "number",
"label": "Type number",
"default": 10
}, {
"id": "c",
"type": "checkbox",
"label": "Type checkbox",
"default": True
}, {
"id": "d",
"type": "textarea",
"label": "Type textarea",
"placeholder": "Placeholder",
"default": "My text \n in textarea"
}, {
"id": "e",
"type": "radio",
"label": "Type radio",
"values": [
{"value": "1", "label": "Radio 1"},
{"value": "2", "label": "Radio 2"}
],
"default": "1"
}, {
"id": "f",
"type": "select",
"multiple": False,
"label": "Type select",
"values": [
{"value": "a", "label": "Option a"},
{"value": "b", "label": "Option b"},
{"value": "c", "label": "Option c"}
],
"default": "b"
}]
|
"""
Support for Selve cover - shutters etc.
"""
import logging
import voluptuous as vol
from homeassistant.components.cover import (
CoverEntity, ATTR_POSITION, SUPPORT_OPEN, SUPPORT_CLOSE, SUPPORT_STOP,
SUPPORT_OPEN_TILT, SUPPORT_CLOSE_TILT, SUPPORT_STOP_TILT, SUPPORT_SET_POSITION, SUPPORT_SET_TILT_POSITION,
DEVICE_CLASS_WINDOW, DEVICE_CLASS_BLIND, DEVICE_CLASS_AWNING, DEVICE_CLASS_SHUTTER)
from custom_components.selve import (
DOMAIN as SELVE_DOMAIN, SelveDevice)
from homeassistant.const import ATTR_ENTITY_ID
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['selve']
_LOGGER = logging.getLogger(__name__)
SERVICE_SET_POS1 = 'selve_set_pos1'
SERVICE_SET_POS2 = 'selve_set_pos2'
SELVE_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SELVE_CLASSTYPES = {
0:None,
1:DEVICE_CLASS_SHUTTER,
2:DEVICE_CLASS_BLIND,
3:DEVICE_CLASS_SHUTTER,
4:'cover',
5:'cover',
6:'cover',
7:'cover',
8:'cover',
9:'cover',
10:'cover',
11:'cover',
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Selve covers."""
controller = hass.data[SELVE_DOMAIN]['controller']
devices = [ SelveCover(device, controller) for device in hass.data[SELVE_DOMAIN]['devices']['cover']]
add_devices(devices, True)
class SelveCover(SelveDevice, CoverEntity):
"""Representation a Selve Cover."""
def update(self):
"""Update method."""
self.selve_device.discover_properties()
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP | SUPPORT_SET_POSITION | SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_SET_TILT_POSITION
@property
def current_cover_position(self):
"""
Return current position of cover.
0 is closed, 100 is fully open.
"""
return 50
@property
def current_cover_tilt_position(self):
"""
Return current position of cover.
0 is closed, 100 is fully open.
"""
return 50
@property
def is_closed(self):
"""Return if the cover is closed."""
# if self.current_cover_position is not None:
# return self.current_cover_position == 0
return None
@property
def device_class(self):
"""Return the class of the device."""
return SELVE_CLASSTYPES.get(self.selve_device.device_type.value)
def open_cover(self, **kwargs):
"""Open the cover."""
self.selve_device.moveUp()
def open_cover_tilt(self, **kwargs):
"""Open the cover."""
self.selve_device.moveIntermediatePosition1()
def close_cover(self, **kwargs):
"""Close the cover."""
self.selve_device.moveDown()
def close_cover_tilt(self, **kwargs):
"""Open the cover."""
self.selve_device.moveIntermediatePosition2()
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.selve_device.stop()
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
self.selve_device.stop()
|
from .tg_conv import TGConvModel
from .tg_policy import TGPolicyModel
from .tg_rec import TGRecModel
|
from templates.codepipeline.pipeline import NewPipeline
from templates.pipeline_template import NewTemplate
from troposphere import Template
from tools.validates import change_yml_to_json
import pytest
import time
import json
import os
import sys
import shutil
class TestCodePipeline:
@pytest.fixture
def params(self):
actions = {
'source':
{
'name': 'source',
'runorder': 1,
'configuration': {'BranchName': 'release', 'RepositoryName': 'sharedlibrary'},
'type': 'Source',
'role': 'arn:aws:iam::033921349789:role/RoleCodepipelineRole'
},
'action':
{
'name': 'compilar',
'runorder': 1,
'configuration': {'ProjectName': 'proj', 'PrimarySource': 'App', 'InputArtifacts': 'App', 'runorder': '1'},
'type': 'Build',
'role': 'arn:aws:iam::033921349789:role/RoleCodepipelineRole'
},
'stage':
{
'name': 'Compilador'
},
'pipeline':
{
'name': 'PipelineEcs',
'role': 'arn:aws:iam::033921349789:role/RoleCodepipelineRole'
},
'templates': ['app-ecs']
}
return actions
@pytest.fixture
def imageCustom(self):
imgcustom = {
"Aqua": {
"all": "imagem_Aqua"
},
"Build": {
"all": "image_Build",
"python37": "image_custom"
},
"TestUnit": {
"all": "imagem_TestUnit"
},
"Fortify": {
"all": "imagem_sast"
},
"Sonar": {
"all": "imagem_sonar"
}
}
return imgcustom
@pytest.fixture
def payloads(self):
payload = [
'payload_1.yml',
'payload_2.yml',
'payload_3.yml',
'payload_4.yml',
'payload_5.yml',
'payload_6.yml'
]
return payload
def gerando_cloudformation(self, resource):
cf = Template()
if isinstance(resource, dict):
for res in resource.values():
cf.add_resource(res)
elif isinstance(resource, list):
for res in resource:
cf.add_resource(res)
resource_json = json.loads(cf.to_json())
return resource_json
def load_template(self, name_template, env=False):
template_json = open('tests/payload-ecs/templates.json')
json_template = template_json.read()
template_json.close()
template = json.loads(json_template)
if template['name'] == name_template:
return template['details']['pipeline']
elif name_template == 'structure':
return template['details']['structure']
elif name_template == 'depends':
return template['details']['depends']
def load_yml(self, filename):
filename = f"tests/payload-ecs/{filename}"
f_template = open(filename)
yml_template = f_template.read()
f_template.close()
json_template = change_yml_to_json(yml_template)
return json_template
def test_deve_retornar_parametros_da_pipeline(self, params):
for template in params['templates']:
app_ecs = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app_ecs.pipeline_parameter()
cf = self.gerando_cloudformation(cf_pipeline)
print(cf)
assert len(cf['Resources']) == 10
assert cf['Resources']['PrivateSubnetOne']['Default'] == '/Networking/PrivateSubnetOne'
assert cf['Resources']['PrivateSubnetTwo']['Default'] == '/Networking/PrivateSubnetTwo'
assert cf['Resources']['VPCID']['Default'] == '/Networking/VPCID'
assert cf['Resources']['DevAccount']['Default'] == '/Accounts/Dev'
assert cf['Resources']['HomologAccount']['Default'] == '/Accounts/Homolog'
assert cf['Resources']['ProdAccount']['Default'] == '/Accounts/Prod'
assert cf['Resources']['KMSKeyArn']['Default'] == '/Shared/KMSKeyArn'
assert cf['Resources']['TokenAqua']['Default'] == '/Shared/TokenAqua'
assert cf['Resources']['DevSecOpsAccount']['Default'] == '/Accounts/DevSecOps'
assert cf['Resources']['DevToolsAccount']['Default'] == '/Accounts/DevTools'
def gettemplate(self, payload, env):
make = self.load_yml(payload)
stages = make['pipeline'][env]
runtime = make['runtime']
params = {}
for param in make['Parameter']:
params.update(param)
dados = {
'stages': stages,
'runtime': runtime,
'params': params
}
return dados
def test_deve_retornar_codebuild_do_template_app_ecs_sem_buildCustomizado(self, params, imageCustom):
for name_template in params['templates']:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
dados = self.gettemplate('payload_1.yml', env)
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'].keys())
assert len(cf['Resources']) == 10
assert 'Aqua' in cf['Resources']
assert 'Build' in cf['Resources']
assert 'Deployecsdev' in cf['Resources']
assert 'Fortify' in cf['Resources']
assert 'Publishecrdev' in cf['Resources']
assert 'Sonar' in cf['Resources']
assert 'Testunit' in cf['Resources']
assert '../01/python/3.7/build/buildspec.yml' in cf['Resources']['Build']['Properties']['Source']['BuildSpec']
assert '../01/python/3.7/testunit/buildspec.yml' in cf[
'Resources']['Testunit']['Properties']['Source']['BuildSpec']
def test_deve_retornar_estrutura_pipeline(self, params, imageCustom):
for name_template in params['templates']:
estrutura = self.load_template('structure', 'develop')
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
stage = 'Deploydev'
cf = app.check_stage_not_env(estrutura, stage, 'develop')
assert cf == True
stage = 'DeployHomol'
cf = app.check_stage_not_env(estrutura, stage, 'master')
assert cf == True
def test_deve_retornar_codebuild_do_template_app_ecs_com_buildCustomizado(self, params, imageCustom):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
dados = self.gettemplate('payload_1.yml', env)
dados['params']['BuildCustom'] = 'True'
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'].keys())
assert len(cf['Resources']) == 10
assert 'Aqua' in cf['Resources']
assert 'Build' in cf['Resources']
assert 'Deployecsdev' in cf['Resources']
assert 'Fortify' in cf['Resources']
assert 'Publishecrdev' in cf['Resources']
assert 'Sonar' in cf['Resources']
assert 'Testunit' in cf['Resources']
assert 'pipeline/buildspec_build.yml' in cf['Resources']['Build']['Properties']['Source']['BuildSpec']
assert 'pipeline/buildspec_testunit.yml' in cf['Resources']['Testunit']['Properties']['Source']['BuildSpec']
def test_deve_retornar_codebuild_do_template_app_ecs_com_action_customizado(self, params, imageCustom):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
dados = self.gettemplate('payload_5.yml', env)
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
templ_pipeline = 0
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'].keys())
assert len(cf['Resources']) == 11
assert 'Aqua' in cf['Resources']
assert 'Build' in cf['Resources']
assert 'Deployecsdev' in cf['Resources']
assert 'Fortify' in cf['Resources']
assert 'Publishecrdev' in cf['Resources']
assert 'Sonar' in cf['Resources']
assert 'Testunit' in cf['Resources']
assert '../01/python/3.7/build/buildspec.yml' in cf['Resources']['Build']['Properties']['Source']['BuildSpec']
assert '../01/python/3.7/testunit/buildspec.yml' in cf[
'Resources']['Testunit']['Properties']['Source']['BuildSpec']
assert 'testmultant' in cf['Resources']
def test_deve_retornar_codebuild_do_template_app_ecs_com_stage_customizado(self, params, imageCustom):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
dados = self.gettemplate('payload_6.yml', env)
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf = self.gerando_cloudformation(cf_pipeline)
resources = list(cf['Resources'].keys())
print(cf['Resources'].keys())
assert len(cf['Resources']) == 13
assert 'Aqua' in resources
assert 'Build' in resources
assert 'Deployecsdev' in resources
assert 'Fortify' in resources
assert 'Publishecrdev' in resources
assert 'Sonar' in resources
assert 'Testunit' in cf['Resources']
assert '../01/python/3.7/build/buildspec.yml' in cf['Resources']['Build']['Properties']['Source']['BuildSpec']
assert '../01/python/3.7/testunit/buildspec.yml' in cf[
'Resources']['Testunit']['Properties']['Source']['BuildSpec']
assert 'seguranca2' in resources
assert 'seguranca1' in resources
assert 'seguranca2' in resources
assert 'seguranca2' in cf['Resources']['seguranca2']['Properties']['Name']
assert 'seguranca1' in cf['Resources']['seguranca1']['Properties']['Name']
assert 'seguranca2' in cf['Resources']['seguranca2']['Properties']['Name']
def test_deve_retornar_source_do_template_app_ecs_sem_source_customizado(self, params):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
template_yml = self.load_yml('payload_1.yml')
stages = template_yml['pipeline'][env]
reponame = template_yml['Parameter'][0]['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_sources(
stages, env, reponame, 'codebuild_role', 'release-1.19')
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'])
assert len(cf['Resources']) == 2
assert 'SharedLibrary' in cf['Resources']
assert 'release-1.19' == cf['Resources']['SharedLibrary']['Configuration']['BranchName']
assert 'false' == cf['Resources']['SharedLibrary']['Configuration']['PollForSourceChanges']
assert 'pipelineaws-sharedlibrary' == cf['Resources']['SharedLibrary']['Configuration']['RepositoryName']
assert 'PipelinePython' in cf['Resources']
assert 'develop' == cf['Resources']['PipelinePython']['Configuration']['BranchName']
assert 'Pipeline-Python' == cf['Resources']['PipelinePython']['Configuration']['RepositoryName']
def test_deve_retornar_source_do_template_app_ecs_com_source_customizado_sem_branch(self, params):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_yml = self.load_yml('payload_3.yml')
stages = template_yml['pipeline'][env]
reponame = template_yml['Parameter'][0]['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_sources(
stages, env, reponame, 'codebuild_role', 'release-1.19')
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'])
assert len(cf['Resources']) == 3
assert 'SharedLibrary' in cf['Resources']
assert 'release-1.19' == cf['Resources']['SharedLibrary']['Configuration']['BranchName']
assert 'false' == cf['Resources']['SharedLibrary']['Configuration']['PollForSourceChanges']
assert 'pipelineaws-sharedlibrary' == cf['Resources']['SharedLibrary']['Configuration']['RepositoryName']
assert 'PipelinePython' in cf['Resources']
assert 'develop' == cf['Resources']['PipelinePython']['Configuration']['BranchName']
assert 'Pipeline-Python' == cf['Resources']['PipelinePython']['Configuration']['RepositoryName']
assert 'Tools' in cf['Resources']
assert 'develop' == cf['Resources']['Tools']['Configuration']['BranchName']
assert 'Tools' == cf['Resources']['Tools']['Configuration']['RepositoryName']
def test_deve_retornar_source_do_template_app_ecs_com_source_customizado_com_branch(self, params):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
template_yml = self.load_yml('payload_4.yml')
stages = template_yml['pipeline'][env]
reponame = template_yml['Parameter'][0]['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_pipeline = app.generate_sources(
stages, 'develop', reponame, 'codebuild_role', 'release-1.19')
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources']['Tools'])
assert len(cf['Resources']) == 3
assert 'SharedLibrary' in cf['Resources']
assert 'release-1.19' == cf['Resources']['SharedLibrary']['Configuration']['BranchName']
assert 'false' == cf['Resources']['SharedLibrary']['Configuration']['PollForSourceChanges']
assert 'pipelineaws-sharedlibrary' == cf['Resources']['SharedLibrary']['Configuration']['RepositoryName']
assert 'SharedLibrary' in cf['Resources']
assert 'develop' == cf['Resources']['PipelinePython']['Configuration']['BranchName']
assert 'Pipeline-Python' == cf['Resources']['PipelinePython']['Configuration']['RepositoryName']
assert 'Tools' in cf['Resources']
assert 'master' == cf['Resources']['Tools']['Configuration']['BranchName']
assert 'Tools' == cf['Resources']['Tools']['Configuration']['RepositoryName']
def generate_action(self, name_template, env, payload, imageCustom):
template_pipeline = self.load_template(name_template, env)
dados = self.gettemplate(payload, env)
reponame = dados['params']['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
cf_codebuild = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf_pipeline = app.generate_action(
dados['stages'], template_pipeline, cf_codebuild, env)
return cf_pipeline
def test_deve_retornar_uma_lista_de_resources_que_pipeline_depende(self, params):
env = 'develop'
dep = self.load_template('depends')
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
deps = app.create_depends('pipelineTeste', env, dep)
cf = self.gerando_cloudformation(deps)
assert 'SG' in cf['Resources'].keys()
assert 'ECRpipelineTesteECRDevelop' in cf['Resources'].keys()
def test_deve_retornar_action_do_template_app_ecs_validando_payloads(self, params, imageCustom, payloads):
for pipe in params['templates']:
for payload in payloads:
cf_pipeline = self.generate_action(
pipe, 'develop', payload, imageCustom)
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'].keys())
print(payload)
if payload == 'payload_5.yml' or payload == 'payload_8.yml':
assert len(cf['Resources']) == 11
elif payload == 'payload_6.yml':
assert len(cf['Resources']) == 13
else:
assert len(cf['Resources']) == 10
def generate_pipeline(self, name_template, env, payload, imageCustom):
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
dados = self.gettemplate(payload, env)
reponame = dados['params']['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
resources = {}
cf_codebuild = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf_source = app.generate_sources(
dados['stages'], env, reponame, 'codebuild_role', 'release-1.19')
cf_action = app.generate_action(
dados['stages'], template_pipeline, cf_codebuild, env)
resources.update(cf_source)
resources.update(cf_action)
cf_pipeline = app.generate_stage(
template_pipeline, resources, env, estrutura)
return cf_pipeline
def test_deve_retornar_stage_do_template_app_ecs_payloads(self, params, imageCustom, payloads):
for pipe in params['templates']:
for payload in payloads:
cf_pipeline = self.generate_pipeline(
pipe, 'develop', payload, imageCustom)
cf = self.gerando_cloudformation(cf_pipeline)
print(payload)
print(cf['Resources'].keys())
if payload == 'payload_6.yml':
assert len(cf['Resources']) == 5
else:
assert len(cf['Resources']) == 3
def test_deve_retornar_pipeline_verificando_stages_e_action(self, params, imageCustom, payloads):
for name_template in params['templates']:
for payload in payloads:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
dados = self.gettemplate(payload, env)
reponame = dados['params']['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
resources = {}
cf_codebuild = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf_source = app.generate_sources(
dados['stages'], 'develop', reponame, 'codebuild_role', 'release-1.19')
cf_action = app.generate_action(
dados['stages'], template_pipeline, cf_codebuild, env)
resources.update(cf_source)
resources.update(cf_action)
cf_stages = app.generate_stage(
template_pipeline, resources, 'develop', estrutura)
cf_pipeline = app.generate_pipeline(
cf_stages, f'{reponame}-develop')
cf = self.gerando_cloudformation(cf_pipeline)
if payload == 'payload_6.yml':
assert len(cf['Resources']['PipelinePythonDevelop']
['Properties']['Stages']) == 5
else:
assert len(cf['Resources']['PipelinePythonDevelop']
['Properties']['Stages']) == 3
def test_deve_salvar_pipeline_na_pasta_swap(self, params, imageCustom):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
dados = self.gettemplate('payload_6.yml', env)
reponame = dados['params']['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
resources = {}
cf_codebuild = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf_source = app.generate_sources(
dados['stages'], env, reponame, 'codebuild_role', 'release-1.19')
cf_action = app.generate_action(
dados['stages'], template_pipeline, cf_codebuild, env)
resources.update(cf_source)
resources.update(cf_action)
cf_stages = app.generate_stage(
template_pipeline, resources, env, estrutura)
cf_pipeline = app.generate_pipeline(cf_stages, f"{reponame}-{env}")
cf = self.gerando_cloudformation(cf_pipeline)
template = json.dumps(cf)
app.save_swap(reponame, template, env, '00000')
assert os.path.isdir('swap') == True
assert os.path.isfile(
'swap/Pipeline-Python-develop-00000.json') == True
os.remove('swap/Pipeline-Python-develop-00000.json')
def test_deve_criar_pasta_swap(self, params, imageCustom):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
dados = self.gettemplate('payload_6.yml', env)
reponame = dados['params']['Projeto']
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
resources = {}
cf_codebuild = app.generate_codebuild(
dados['runtime'], template_pipeline, dados['stages'], dados['params'], env, imageCustom)
cf_source = app.generate_sources(
dados['stages'], env, reponame, 'codebuild_role', 'release-1.19')
cf_action = app.generate_action(
dados['stages'], template_pipeline, cf_codebuild, env)
resources.update(cf_source)
resources.update(cf_action)
cf_stages = app.generate_stage(
template_pipeline, resources, env, estrutura)
cf_pipeline = app.generate_pipeline(cf_stages, f"{reponame}-{env}")
cf = self.gerando_cloudformation(cf_pipeline)
template = json.dumps(cf)
shutil.rmtree('swap')
app.save_swap(reponame, template, env, '00000')
assert os.path.isdir('swap') == True
assert os.path.isfile(
'swap/Pipeline-Python-develop-00000.json') == True
os.remove('swap/Pipeline-Python-develop-00000.json')
os.rmdir('swap')
def test_deve_retornar_url_da_pipeline(self, params, imageCustom):
for pipe in params['templates']:
env = 'develop'
name_template = pipe
template_pipeline = self.load_template(name_template, 'develop')
estrutura = self.load_template('structure', env)
depends = self.load_template('depends', env)
dados = self.gettemplate('payload_6.yml', env)
app = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
template_params = {
'env': env,
'runtime': dados['runtime'],
'stages': dados['stages'],
'account': '000000',
'pipeline_stages': template_pipeline,
'params': dados['params'],
'release': 'release-10',
'imageCustom': imageCustom,
'structure': estrutura,
'depends': depends
}
file_template = app.generate(tp=template_params)
print(file_template)
assert os.path.isdir('swap') == True
assert os.path.isfile(
'swap/Pipeline-Python-develop-000000.json') == True
os.remove('swap/Pipeline-Python-develop-000000.json')
def test_deve_verificar_a_estrutura_da_pipeline(self, params, imageCustom, payloads):
for name_template in params['templates']:
for payload in payloads:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
depends = self.load_template('depends', env)
dados = self.gettemplate(payload, env)
codepipeline_role = "arn:aws:iam::033921349789:role/RoleCodepipelineRole"
codebuild_role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
DevSecOps_Role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
app = NewTemplate(codepipeline_role,
codebuild_role, DevSecOps_Role)
template_params = {
'env': env,
'runtime': dados['runtime'],
'stages': dados['stages'],
'account': '000000',
'pipeline_stages': template_pipeline,
'params': dados['params'],
'release': 'release-10',
'imageCustom': imageCustom,
'structure': estrutura,
'depends': depends
}
file_template = app.generate(tp=template_params)
# Abrindo a pipeline criada
ft = open(file_template)
ftemplate = json.loads(ft.read())
ft.close()
resources = ftemplate['Resources'].keys()
codebuilds = []
sg = []
for resource in resources:
if ftemplate['Resources'][resource]['Type'] == 'AWS::CodeBuild::Project':
name = ftemplate['Resources'][resource]['Properties']['Name']
codebuilds.append(name)
elif ftemplate['Resources'][resource]['Type'] == 'AWS::EC2::SecurityGroup':
sg.append(ftemplate['Resources'][resource])
for resource in resources:
if ftemplate['Resources'][resource]['Type'] == 'AWS::CodePipeline::Pipeline':
for stages in (ftemplate['Resources'][resource]['Properties']['Stages']):
for action in stages['Actions']:
if action['ActionTypeId']['Category'] == 'Build':
assert action['Configuration']['ProjectName'] in codebuilds
assert sg
print(payload)
if payload == 'payload_6.yml':
assert len(
ftemplate['Resources']['PipelinePythonDevelop']['Properties']['Stages']) == 5
else:
assert len(
ftemplate['Resources']['PipelinePythonDevelop']['Properties']['Stages']) == 3
actions = ftemplate['Resources']['PipelinePythonDevelop']['Properties']['Stages']
if payload == 'payload_1.yml':
print(len(actions[2]['Actions']))
assert len(actions) == 3
assert len(actions[0]['Actions']) == 2
assert len(actions[1]['Actions']) == 8
assert len(actions[2]['Actions']) == 2
elif payload == 'payload_2.yml':
print(len(actions[2]['Actions']))
assert len(actions) == 3
assert len(actions[0]['Actions']) == 2
assert len(actions[1]['Actions']) == 8
assert len(actions[2]['Actions']) == 2
elif payload == 'payload_3.yml':
print(len(actions[2]['Actions']))
assert len(actions) == 3
assert len(actions[0]['Actions']) == 3
assert len(actions[1]['Actions']) == 8
assert len(actions[2]['Actions']) == 2
elif payload == 'payload_4.yml':
print(len(actions[2]['Actions']))
assert len(actions) == 3
assert len(actions[0]['Actions']) == 3
assert len(actions[1]['Actions']) == 8
assert len(actions[2]['Actions']) == 2
elif payload == 'payload_5.yml':
print(len(actions[2]['Actions']))
assert len(actions) == 3
assert len(actions[0]['Actions']) == 3
assert len(actions[1]['Actions']) == 9
assert len(actions[2]['Actions']) == 2
elif payload == 'payload_6.yml':
print(actions[4])
assert len(actions) == 5
assert len(actions[0]['Actions']) == 2
assert len(actions[1]['Actions']) == 8
assert len(actions[2]['Actions']) == 2
assert len(actions[4]['Actions']) == 2
os.remove('swap/Pipeline-Python-develop-000000.json')
def test_deve_retornar_pipeline_com_action_obrigatorio_com_source_personalizado(self, params, payloads, imageCustom):
"""
Este teste deve validar a alteracao de um codebuild obrigatorio como o build, mas com o source personalizado
"""
for name_template in params['templates']:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
depends = self.load_template('depends', env)
dados = self.gettemplate('payload_8.yml', env)
codepipeline_role = "arn:aws:iam::033921349789:role/RoleCodepipelineRole"
codebuild_role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
DevSecOps_Role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
app = NewTemplate(codepipeline_role,
codebuild_role, DevSecOps_Role)
template_params = {
'env': env,
'runtime': dados['runtime'],
'stages': dados['stages'],
'account': '000000',
'pipeline_stages': template_pipeline,
'params': dados['params'],
'release': 'release-10',
'imageCustom': imageCustom,
'structure': estrutura,
'depends': depends
}
file_template = app.generate(tp=template_params)
# Abrindo a pipeline criada
ft = open(file_template)
ftemplate = json.loads(ft.read())
ft.close()
resources = ftemplate['Resources'].keys()
l_actions = ftemplate['Resources']['PipelinePythonDevelop']['Properties']['Stages']
for actions in l_actions:
for action in actions['Actions']:
if action['ActionTypeId']['Category'] != 'Source':
print(action)
if action['Name'] == 'Build':
assert [{'Name': 'Normalizacao'}] == [
item for item in action['InputArtifacts'] if item['Name'] == 'Normalizacao']
assert 'Normalizacao' == action['Configuration']['PrimarySource']
if action['Name'] == 'Testunit':
assert [{'Name': 'Normalizacao'}] == [
item for item in action['InputArtifacts'] if item['Name'] == 'Normalizacao']
assert 'Normalizacao' == action['Configuration']['PrimarySource']
if action['Name'] == 'Sonar':
assert [{'Name': 'Normalizacao'}] == [
item for item in action['InputArtifacts'] if item['Name'] == 'Normalizacao']
assert 'Normalizacao' == action['Configuration']['PrimarySource']
def test_deve_retornar_pipeline_com_action_customizado_com_multiplos_sources(self, params, payloads, imageCustom):
"""
Este teste deve validar a alteracao de um codebuild obrigatorio como o build, mas com o source personalizado
"""
for name_template in params['templates']:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
depends = self.load_template('depends', env)
dados = self.gettemplate('payload_8.yml', env)
codepipeline_role = "arn:aws:iam::033921349789:role/RoleCodepipelineRole"
codebuild_role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
DevSecOps_Role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
app = NewTemplate(codepipeline_role,
codebuild_role, DevSecOps_Role)
template_params = {
'env': env,
'runtime': dados['runtime'],
'stages': dados['stages'],
'account': '000000',
'pipeline_stages': template_pipeline,
'params': dados['params'],
'release': 'release-10',
'imageCustom': imageCustom,
'structure': estrutura,
'depends': depends
}
file_template = app.generate(tp=template_params)
# Abrindo a pipeline criada
ft = open(file_template)
ftemplate = json.loads(ft.read())
ft.close()
resources = ftemplate['Resources'].keys()
l_actions = ftemplate['Resources']['PipelinePythonDevelop']['Properties']['Stages']
for actions in l_actions:
for action in actions['Actions']:
if action['ActionTypeId']['Category'] != 'Source':
if action['Name'] == 'Normalizacao':
print(action)
assert [{'Name': 'App'}, {'Name': 'App2'}, {
'Name': 'App3'}] == action['InputArtifacts']
assert 'App' == action['Configuration']['PrimarySource']
if action['Name'] == 'Testmultant':
print(action)
assert [{'Name': 'Build'}
] == action['InputArtifacts']
assert 'Build' == action['Configuration']['PrimarySource']
def test_deve_retornar_pipeline_com_stages_ordenados(self, params, payloads, imageCustom):
"""
Este teste deve validar a alteracao de um codebuild obrigatorio como o build, mas com o source personalizado
"""
for name_template in params['templates']:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
depends = self.load_template('depends', env)
dados = self.gettemplate('payload_9.yml', env)
codepipeline_role = "arn:aws:iam::033921349789:role/RoleCodepipelineRole"
codebuild_role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
DevSecOps_Role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
app = NewTemplate(codepipeline_role,
codebuild_role, DevSecOps_Role)
template_params = {
'env': env,
'runtime': dados['runtime'],
'stages': dados['stages'],
'account': '000000',
'pipeline_stages': template_pipeline,
'params': dados['params'],
'release': 'release-10',
'imageCustom': imageCustom,
'structure': estrutura,
'depends': depends
}
file_template = app.generate(tp=template_params)
# Abrindo a pipeline criada
ft = open(file_template)
ftemplate = json.loads(ft.read())
ft.close()
resources = ftemplate['Resources'].keys()
l_actions = ftemplate['Resources']['PipelinePythonDevelop']['Properties']['Stages']
list_stages = [stage['Name'] for stage in l_actions]
print(list_stages)
assert ['Source', 'Continuous_Integration', 'Seguranca',
'Seguranca3', 'DeployDev'] == list_stages
def test_deve_retornar_codebuild_eh_madatorio(self, params):
for name_template in params['templates']:
buildName = ['Build', 'Customizado']
env = 'develop'
template_pipeline = self.load_template(name_template, env)
pipeline = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
mandatorio = pipeline.codebuild_mandatory(
buildName[0], template_pipeline)
customizado = pipeline.codebuild_mandatory(
buildName[1], template_pipeline)
assert mandatorio == True
assert customizado == False
def test_deve_retornar_codebuild_com_source_personalizado(self, params):
for name_template in params['templates']:
env = 'develop'
template_pipeline = self.load_template(name_template, env)
pipeline = NewTemplate('codepipeline_role',
'codebuild_role', 'DevSecOps_Role')
source1 = pipeline.check_is_not_codebuild(
'Source', template_pipeline)
source2 = pipeline.check_is_not_codebuild(
'Build', template_pipeline)
source3 = pipeline.check_is_not_codebuild(
'Agendamento1', template_pipeline)
source4 = pipeline.check_is_not_codebuild(
'AprovacaoPO', template_pipeline)
assert source1 == True
assert source2 == False
assert source3 == True
assert source4 == True
def test_deve_retornar_pipeline_master(self, params, imageCustom, payloads):
for pipe in params['templates']:
cf_pipeline = self.generate_pipeline(
pipe, 'master', 'payload_1.yml', imageCustom)
cf = self.gerando_cloudformation(cf_pipeline)
print(cf['Resources'].keys())
assert len(cf['Resources']) == 7
def create_pipeline(self, name_template, env, imageCustom, payload):
template_pipeline = self.load_template(name_template, env)
estrutura = self.load_template('structure', env)
depends = self.load_template('depends', env)
dados = self.gettemplate(payload, env)
codepipeline_role = "arn:aws:iam::033921349789:role/RoleCodepipelineRole"
codebuild_role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
DevSecOps_Role = "arn:aws:iam::033921349789:role/RoleCodeBuildRole"
app = NewTemplate(codepipeline_role,
codebuild_role, DevSecOps_Role)
template_params = {
'env': env,
'runtime': dados['runtime'],
'stages': dados['stages'],
'account': '000000',
'pipeline_stages': template_pipeline,
'params': dados['params'],
'release': 'release-10',
'imageCustom': imageCustom,
'structure': estrutura,
'depends': depends
}
file_template = app.generate(tp=template_params)
# Abrindo a pipeline criada
ft = open(file_template)
ftemplate = json.loads(ft.read())
ft.close()
return ftemplate
def test_deve_retornar_pipeline_com_action_de_aprovacao(self, params, payloads, imageCustom):
"""
Este teste deve validar a alteracao de um codebuild obrigatorio como o build, mas com o source personalizado
"""
for name_template in params['templates']:
env = 'master'
ftemplate = self.create_pipeline(
name_template, env, imageCustom, 'payload_11.yml')
env_ = env.capitalize()
pipe_name = f'PipelinePython{env_}'
l_actions = ftemplate['Resources']['PipelinePythonMaster']['Properties']['Stages']
cont = 0
for actions in l_actions:
for action in actions['Actions']:
if action['ActionTypeId']['Category'] == 'Approval':
cont += 1
assert cont == 2
def test_deve_verificar_se_action_nao_estao_vazios(self, params, payloads, imageCustom):
"""
Este teste deve validar a alteracao de um codebuild obrigatorio como o build, mas com o source personalizado
"""
for name_template in params['templates']:
for env in ['develop', 'master']:
ftemplate = self.create_pipeline(
name_template, env, imageCustom, 'payload_11.yml')
env_ = env.capitalize()
pipe_name = f'PipelinePython{env_}'
l_actions = ftemplate['Resources'][pipe_name]['Properties']['Stages']
cont = 0
for actions in l_actions:
assert len(actions['Actions']) != 0
def test_deve_verificar_se_foi_removido_o_campo_type(self, params, payloads, imageCustom):
"""
Esse teste tem que validar se o campo type inserido no template foi removido.
type é utilizado para informar o tipo do action
"""
for name_template in params['templates']:
for env in ['develop', 'master']:
ftemplate = self.create_pipeline(
name_template, env, imageCustom, 'payload_11.yml')
env_ = env.capitalize()
pipe_name = f'PipelinePython{env_}'
l_actions = ftemplate['Resources'][pipe_name]['Properties']['Stages']
cont = 0
type_codebuild = False
for actions in l_actions:
for confs in actions['Actions']:
assert 'type' not in confs['Configuration']
def test_deve_verificar_se_foi_removido_o_campo_runorder_do_config(self, params, payloads, imageCustom):
"""
Esse teste tem que validar se o campo runorder inserido no template foi removido.
"""
for name_template in params['templates']:
for env in ['develop', 'master']:
ftemplate = self.create_pipeline(
name_template, env, imageCustom, 'payload_11.yml')
env_ = env.capitalize()
pipe_name = f'PipelinePython{env_}'
l_actions = ftemplate['Resources'][pipe_name]['Properties']['Stages']
cont = 0
runorder = False
for actions in l_actions:
for confs in actions['Actions']:
assert 'runorder' not in confs['Configuration']
def test_deve_validar_os_stages_customizados_action_InvokeLambda_Custom_Approval(self, params, payloads, imageCustom):
"""
Esse teste tem que validar se o campo runorder inserido no template foi removido.
"""
for name_template in params['templates']:
for env in ['develop']:
ftemplate = self.create_pipeline(
name_template, env, imageCustom, 'payload_11.yml')
env_ = env.capitalize()
pipe_name = f'PipelinePython{env_}'
l_stages = ftemplate['Resources'][pipe_name]['Properties']
for stages in l_stages['Stages']:
print(stages['Name'])
if 'source' == stages['Name'].lower():
assert len(stages['Actions']) == 2
assert 'sharedlibrary' == stages['Actions'][0]['Name'].lower(
)
assert 'pipeline-python' == stages['Actions'][1]['Name'].lower()
elif 'continuous_integration' == stages['Name'].lower():
assert len(stages['Actions']) == 11
assert 'controlversion' == stages['Actions'][0]['Name'].lower(
)
assert 'sonar' == stages['Actions'][2]['Name'].lower()
assert 'testunit' == stages['Actions'][3]['Name'].lower(
)
assert 'build' == stages['Actions'][4]['Name'].lower()
assert 'aqua' == stages['Actions'][5]['Name'].lower(
)
assert 'auditapp' == stages['Actions'][6]['Name'].lower(
)
assert 'parametersapp' == stages['Actions'][7]['Name'].lower(
)
assert 'normalizacao' == stages['Actions'][8]['Name'].lower(
)
assert 'rodalambda' == stages['Actions'][9]['Name'].lower(
)
assert 'gonogo' == stages['Actions'][10]['Name'].lower(
)
elif 'deploydev' == stages['Name'].lower():
assert len(stages['Actions']) == 2
assert 'publishecrdev' == stages['Actions'][0]['Name'].lower(
)
assert 'deployecsdev' == stages['Actions'][1]['Name'].lower(
)
def test_deve_validar_customizacao_da_branch_do_projeto(self, params, payloads, imageCustom):
"""
Esse teste tem que validar se o campo runorder inserido no template foi removido.
"""
for name_template in params['templates']:
for env in ['develop', 'master']:
ftemplate = self.create_pipeline(
name_template, env, imageCustom, 'payload_12.yml')
env_ = env.capitalize()
pipe_name = f'PipelinePython{env_}'
l_actions = ftemplate['Resources'][pipe_name]['Properties']['Stages']
cont = 0
runorder = False
for actions in l_actions:
for confs in actions['Actions']:
if confs['ActionTypeId']['Category'].lower() == 'source':
print(confs)
if 'SharedLibrary' != confs['Name']:
if env == 'develop':
assert confs['Configuration']['BranchName'] == 'feature-teste'
else:
assert confs['Configuration']['BranchName'] == 'master'
|
from config import Config
from ..datastore import db
from ..user import HasUser
from datetime import datetime
from sqlalchemy_utils import EncryptedType
class Vendor(db.Model, HasUser):
id = db.Column(db.Integer(),
primary_key=True)
slug = db.Column(db.Unicode(),
index=True,
unique=True)
created_at = db.Column(db.DateTime(),
default=datetime.utcnow)
updated_at = db.Column(db.DateTime(),
default=datetime.utcnow,
onupdate=datetime.utcnow)
company_name = db.Column(db.Unicode(255))
stripe_sk = db.Column(EncryptedType(db.String,
Config.SECRET_KEY))
stripe_pk = db.Column(EncryptedType(db.String,
Config.SECRET_KEY))
redirect_url = db.Column(db.Unicode())
def render_stripe_sk(self):
return '****{}'.format(self.stripe_sk[-4:])
def render_stripe_pk(self):
return '****{}'.format(self.stripe_pk[-4:])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-02-06 12:38
from __future__ import unicode_literals
import autoslug.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('multimedia', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Multimedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('multimedia_link', models.FileField(upload_to='media/')),
('title', models.CharField(max_length=500)),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='title')),
('categorie', models.CharField(choices=[('F', 'File'), ('V', 'Video'), ('A', 'Audio')], default='V', max_length=1)),
('status', models.CharField(choices=[('D', 'Draft'), ('P', 'Published')], default='D', max_length=1)),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now=True)),
('create_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('update_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('title',),
'verbose_name': 'Multimedia',
'verbose_name_plural': 'Multimedias',
},
),
migrations.RemoveField(
model_name='videolink',
name='create_user',
),
migrations.RemoveField(
model_name='videolink',
name='update_user',
),
migrations.DeleteModel(
name='VideoLink',
),
]
|
import SimpleITK
import sys
if len ( sys.argv ) < 4:
print "Usage: MathematicalMorphology <input> <operation> <output>";
print "operation: 0=binary erode, 1=binary dilate, 2=grayscale erode, 3=grayscale dilate";
sys.exit ( 1 )
reader = SimpleITK.ImageFileReader()
reader.SetFileName ( sys.argv[1] )
image = reader.Execute();
print image.ToString()
morphoMath = SimpleITK.MathematicalMorphologyImageFilter()
morphoMath.SetOperation ( int ( sys.argv[2] ) )
image = morphoMath.Execute ( image );
print image.ToString()
writer = SimpleITK.ImageFileWriter()
writer.SetFileName ( sys.argv[3] )
writer.Execute ( image );
|
"""Data analytics functions for processing neuroimaging data
Functions:
parse_volumes -> dataframe
find_largest_volume -> tuple
load_scan(path) -> array
get_pixels_hu -> array
show_scan
extract_pixels -> array
flatten -> array
flat_wrapper -> array
show_dist
show_cluster_dist
cluster -> dataframe
cluster_wrapper -> dataframe
cluster_modes -> dataframe
find_middle_cluster -> integer
filter_by_cluster -> dataframe
get_HUrange -> tuple
compare_scans
mask -> array
mask_wrapper -> array
binary_mask -> array
remove_islands -> array
render_volume -> figure
"""
import os
from dotenv import dotenv_values
import pydicom
from math import *
import numpy as np
import pandas as pd
import pickle
import copy
import matplotlib.pyplot as plt
from itertools import chain
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
from sklearn.cluster import KMeans
import cv2
def parse_volumes(dicom_path=None, debug=True):
"""Extract all volumes from the selected DICOM directory and return the file paths
Keyword arguments:
path -- the directory where *ALL* the DICOM (.dcm) images are located
Returns: Dataframe of metadata about the volumes
"""
if (dicom_path==None):
config = dotenv_values(".env")
dicom_path = config['DICOM_SAVE']
volumes = {}
for path, subdirs, files in os.walk(dicom_path):
for name in files:
file_path = os.path.join(path,name)
splitfile = os.path.splitext(name)
vol = ('/').join(file_path.split('/')[:-1])
if splitfile[1]=='.dcm':
if vol not in volumes:
volumes[vol]=[]
else:
volumes[vol].append(name)
df = pd.DataFrame()
df['path']=list(volumes.keys())
df['files']=list(volumes.values())
df.index = [(path.split('/'))[-1] for path in df['path']]
df['count']=[len(files) for files in df['files']]
if debug:
print(df.drop(columns=['files']))
print(f'\nThe volume with the highest slice count can be found at: \n {df[df["count"] == df["count"].max()]["path"][0]}')
return df
def find_largest_volume(dicom_path=None, debug=True):
"""Find the volume with the greatest number of slices (for demonstration)
Keyword arguments:
path -- the directory where *ALL* the DICOM (.dcm) images are located
Returns: Tuple of (path, name) of first largest volume in the dicom path
"""
volumes = parse_volumes(dicom_path=dicom_path, debug=debug)
path = volumes[volumes["count"] == volumes["count"].max()]["path"][0]
name = list(volumes[volumes["count"] == volumes["count"].max()].index)[0]
return path, name
def load_scan(path):
"""Load DICOM data from a local directory.
Keyword arguments:
path -- the directory where the target volume's DICOM (.dcm) images are located
Returns: 3D pixel array of DICOM slices
"""
slices = [pydicom.dcmread(os.path.join(path,s)) for s in
os.listdir(path)]
slices = [s for s in slices if 'SliceLocation' in s]
slices.sort(key = lambda x: int(x.InstanceNumber))
return slices
def get_pixels_hu(slices):
"""Extract an array of Hounsfield Unit values from a stack of scans
Source: https://hengloose.medium.com/a-comprehensive-starter-guide-to-visualizing-and-analyzing-dicom-images-in-python-7a8430fcb7ed
Author: Franklin Heng
Keyword arguments:
slices -- an array of images
Returns: 3D numpy array of HU values
"""
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
image[image == -2000] = 0 # Set outside-of-scan pixels to 0
intercept = slices[0].RescaleIntercept if 'RescaleIntercept' in slices[0] else -1024 # Convert to Hounsfield units (HU)
slope = slices[0].RescaleSlope if 'RescaleSlope' in slices[0] else 1
if slope != 1:
image = slope * image.astype(np.float64)
image = image.astype(np.int16)
image += np.int16(intercept)
return np.array(image, dtype=np.int16)
def show_scan(pixels, viewer='plotly', output=False):
"""Render an image of the scan by slicing through the middle of the 3D array
Keyword arguments:
pixels -- a 3D numpy array of pixels for each slice of the DICOM image
output -- (optional) default: None; path of a location to save the image
Returns: Nothing; shows image
"""
midbrain = int(np.floor(len(pixels)/2))
fig = go.Figure(
data=go.Heatmap(z=pixels[midbrain],
colorscale = 'gray'),
)
if output:
head, tail = os.path.split(output)
htmlsavepath = os.path.join(head, tail+'.html')
fig.write_html(htmlsavepath)
pngsavepath = os.path.join(head, tail+'.png')
plt.imsave(pngsavepath, pixels[midbrain], cmap=plt.cm.binary)
if viewer.lower() =='plotly':
fig.show()
plt.ioff()
plt.close()
else:
plt.imshow(pixels[midbrain], cmap=plt.cm.binary)
plt.show()
def extract_pixels(path, debug=True):
"""Extract an array of pixels from a DICOM directory
Keyword arguments:
path -- the directory where the target volume's DICOM (.dcm) images are located
Returns: 3D numpy array of HU values
"""
dicom = load_scan(path)
pixels = get_pixels_hu(dicom)
if debug==True: # If debugging, show the image
show_scan(pixels, output=path)
return pixels
def flatten(pixels, output=False, min_filter=-1024):
"""Flattens a 3D numpy array of pixels for each slice of the DICOM image into a single 1D array,
excluding any values below the min_filter, and optionally saves the array as a pickle file
Keyword arguments:
pixels -- a 3D numpy array of pixels for each slice of the DICOM image
output -- (optional) default: None; path of a location to save the pickle
Returns: flattened 1D array of HU values greater than the min_filter value
"""
flatten_list = list(chain.from_iterable(chain.from_iterable(pixels)))
result = [x for x in flatten_list if x > min_filter]
if output:
try: os.makedirs(output)
except: pass
with open(os.path.join(output, f'flattened.pkl'), 'wb') as f:
pickle.dump(result, f)
return result
def flat_wrapper(pixels, output=False):
"""Wraps the flatten() function so as to check if the data has already been flattened
and not repeat the process if it is not necessary.
Keyword arguments:
pixels -- a 3D numpy array of pixels for each slice of the DICOM image
output -- (optional) default: None; the possible pre-existing save location to check
Returns: flattened 1D array of HU values
"""
if output:
try:
flat = pd.read_pickle(os.path.join(output, f'flattened.pkl'))
return flat
except:
pass
flat = flatten(pixels, output=output)
return flat
def show_dist(flat, viewer='plotly', output=False):
"""Display the distribution of values in a 1D array
Keyword arguments:
flat -- flattened 1D array of values
viewer -- 'plotly' (case-insensitive) for interactive ; anything else (default: 'mpl') returns a static matplotlib histogram
output -- (optional) default: None; (optionally, path and) filename *without extension* where to save the histogram
Returns: Nothing; shows image
"""
fig = go.Figure(data=[go.Histogram(x=flat)])
plt.clf()
plt.hist(flat, 100, facecolor='blue', alpha=0.5)
if output:
try: os.makedirs(output)
except: pass
fig.write_html(os.path.join(output,'pixel_distribution.html'))
plt.savefig(os.path.join(output,'pixel_distribution.png'))
if viewer.lower() =='plotly':
fig.show()
plt.ioff()
plt.close()
else: plt.show()
def show_cluster_dist(df, viewer='plotly', output=False):
"""Display the distribution of values by cluster
Keyword arguments:
df -- the pandas dataframe that stores the values, where (df.x is value) and (df.y is cluster_id)
viewer -- 'plotly' (case-insensitive) for interactive ; anything else returns a static matplotlib histogram
output -- (optional) default: None; (optionally, path and) filename *without extension* where to save the histogram
Returns: Nothing; shows image
"""
fig = px.histogram(df, x="x", color="y")
dfArr = []
for l in list(set(df['y'])):
dfArr.append(df[df['y']==l])
colors = ['red', 'green', 'blue','orange','purple','yellow','brown','gray']
i = 0
plt.clf()
for c in dfArr:
plt.hist(c['x'], 100, facecolor=colors[i], alpha=0.5, label=i)
i+=1
if output:
try: os.makedirs(output)
except: pass
fig.write_html(os.path.join(output,'cluster_distribution.html'))
plt.savefig(os.path.join(output,'cluster_distribution.png'))
if viewer.lower()=='plotly':
fig.show()
plt.ioff()
plt.close()
else: plt.show()
def cluster(flat, k=3, output=None):
"""Run k-means pixel clustering on a 1D array and (optionally) save as CSV
Keyword arguments:
flat -- 1D array of values
k -- number of clusters (default: 3)
output -- (optional) default: None; location to save the CSV
Returns: Dataframe of metadata about the cluster index for each pixel
"""
km = KMeans(n_clusters=k)
npArr = np.array(flat).reshape(-1,1)
km.fit(npArr)
label = km.fit_predict(npArr)
df = pd.DataFrame(data={'x':flat, 'y':label})
if output:
try: os.makedirs(output)
except: pass
df.to_csv(os.path.join(output,f'cluster_k{k}.csv'), index=False)
show_cluster_dist(df, output=output)
return df
def cluster_wrapper(pixels=False, flat=None, k=3, output=False):
"""Wraps the flatten() and cluster() functions
Keyword arguments:
pixels -- (optional) a 3D numpy array of pixels for each slice of the DICOM image
flat -- (optional; required if 'pixels' not provided) 1D array of values
k -- number of clusters (default: 3)
output -- (optional) default: None; location to save the CSV
Returns: Dataframe of metadata about the cluster index for each pixel
"""
if flat is None:
try:
flat = flatten(pixels)
except:
print('Error! If no flattened array is provided, you must supply a pixels 3D array to flatten')
if output:
try:
clustered = pd.read_csv(os.path.join(output,f'cluster_k{k}.csv'))
return clustered
except:
pass
clustered = cluster(flat, k=k, output=output)
return clustered
def cluster_modes(df):
"""Find the most common value in each cluster
Keyword arguments:
df -- the dataframe generated by cluster(), where (df.x is value) and (df.y is cluster_id)
Returns: Dataframe of metadata about the modes for each cluster
"""
clusters = list(set(df['y']))
modes = []
for k in clusters:
modes.append(df[df['y']==k]['x'].mode()[0])
mdf = pd.DataFrame(data={'cluster':clusters, 'mode':modes})
return mdf
def find_middle_cluster(df):
"""Select the cluster with the median mode
(use the higher median instead of averaging when the number of clusters is even)
Keyword arguments:
df -- the dataframe generated by cluster(), where (df.x is value) and (df.y is cluster_id)
Returns: Index of the cluster with the modal value that is the median out of all the cluster modes
"""
mdf = cluster_modes(df)
median = mdf['mode'].median()
k = int(mdf[mdf['mode']==median]['cluster'])
return k
def filter_by_cluster(df, cluster=None):
"""Filter the dataframe to only include a single cluster;
if cluster is not specified, use the middle cluster determined by find_middle_cluster()
Keyword arguments:
df -- the dataframe generated by cluster(), where (df.x is value) and (df.y is cluster_id)
cluster -- (optional) the specific cluster for which to filter
Returns: Filtered dataframe of pixel values and their cluster index
"""
if cluster is None:
cluster = find_middle_cluster(df)
filtered = df[df['y']==cluster]['x']
return filtered
def get_HUrange(df, cluster=None):
"""Extract the Hounsfield Unit (HU) range of the cluster;
if cluster is not specified, use the middle cluster determined by find_middle_cluster()
Keyword arguments:
df -- the dataframe generated by cluster(), where (df.x is value) and (df.y is cluster_id)
cluster -- (optional) the specific cluster for which to filter
Returns: Tuple of (minHU, maxHU)
"""
if cluster is None:
cluster = find_middle_cluster(df)
minHU = df[df['y']==cluster]['x'].min()
maxHU = df[df['y']==cluster]['x'].max()
return (minHU, maxHU)
def compare_scans(baseline, compare, viewer="plotly", output=False):
"""Show a slice through the middle of two brain scans (3D numpy arrays) side-by-side
and (optionally) save the comparison image
Keyword arguments:
original -- the first 3D numpy array to compare
mask -- the second 3D numpy array to compare
viewer -- 'plotly' for interactive; anything else returns a static matplotlib histogram
output -- (optional) default: None; (optionally, path and) filename *without extension* where to save the scan comparison image
Returns: Nothing; shows image
"""
midbrain = int(np.floor(len(baseline)/2))
print(f'Midbrain: {midbrain}')
fig = make_subplots(1, 2, subplot_titles=("Baseline",'Compare'))
fig.add_trace(go.Heatmap(z=baseline[midbrain],colorscale = 'gray'), 1, 1)
fig.add_trace(go.Heatmap(z=compare[midbrain],colorscale = 'gray'), 1, 2)
fig.update_layout(height=400, width=800)
plt.figure()
f, axarr = plt.subplots(1,2)
axarr[0].imshow(baseline[midbrain], cmap=plt.cm.binary)
axarr[1].imshow(compare[midbrain], cmap=plt.cm.binary)
if output:
try: os.makedirs(output)
except: pass
fig.write_html(os.path.join(output, 'compare_scans.html'))
plt.savefig(os.path.join(output, 'compare_scans.png'))
if viewer=="plotly":
fig.show()
plt.ioff()
plt.close()
else:
plt.show()
def mask(pixels, HUrange, output=False, debug=True):
"""Mask a 3D numpy array by a specified range by pushing pixels outside of the range
1000HU below the minimum of the range
Keyword arguments:
pixels -- 3D numpy array
HUrange -- a tuple of (min, max) HUrange
output -- (optional) default: None; location + filename where to save the masked data
Returns: 3D numpy array of masked pixel values
"""
mask = copy.deepcopy(pixels)
i=0
for img in mask:
j=0
for row in img:
mask[i][j] = [HUrange[0]-1000 if ((x < HUrange[0]) or (x > HUrange[1])) else x for x in row]
j+=1
i+=1
if output:
try: os.makedirs(output)
except: pass
with open(os.path.join(output, f'mask_{HUrange[0]}-{HUrange[1]}.pkl'), 'wb') as f:
pickle.dump(mask, f)
if debug:
show_scan(mask, output=output)
return mask
def mask_wrapper(pixels, output=None, HUrange=None, df=True, debug=True):
"""Wrapper for the mask() function which extracts an HUrange from the dataframe if there is no range specified
and optionally checks to see if the mask data has already been pre-computed and saved in a specified location
Keyword arguments:
pixels -- 3D numpy array of values from the DICOM image stack
output -- (optional) default: None; location + filename where to check for (or save) the masked data
HUrange -- (optional) a custom Hounsfield Units range for masking
df -- (optional, required if HUrange is 'None') dataframe from which to extract HUrange
Returns: 3D numpy array of masked pixel values
"""
if HUrange is None:
if df is not None:
HUrange = get_HUrange(df)
else:
print('Error! Must supply HUrange OR df')
try:
masked = pd.read_pickle(os.path.join(output, f'mask_{HUrange[0]}-{HUrange[1]}.pkl'))
except:
masked = mask(pixels, HUrange, output=output, debug=debug)
return masked
def binary_mask(pixels, maskRange, output=False, debug=True):
"""Generate a binary mask from a 3D numpy array according to a specified range
Keyword arguments:
pixels -- 3D numpy array
maskRange -- tuple of (min, max) Hounsfield unit range
Returns: 3D numpy array (binary) of masked pixel values
"""
binary = copy.deepcopy(pixels)
i=0
for img in pixels:
j=0
for row in img:
binary[i][j] = [1 if (maskRange[0] < x < maskRange[1]) else 0 for x in row]
j+=1
i+=1
if output:
try: os.makedirs(output)
except: pass
with open(os.path.join(output, f'binary-mask_{maskRange[0]}-{maskRange[1]}.pkl'), 'wb') as f:
pickle.dump(mask,f)
if debug: compare_scans(pixels, binary, viewer='plotly')
return binary
def remove_islands(pixels, output=False, k=3):
"""Generate a new 3D numpy array which removes islands using OpenCV's 'opening' function
Keyword arguments:
pixels -- 3D numpy array
k -- square kernel size in pixels for opening; default: 3, which returns a kernel of size 3x3
output -- (optional) where to save the pickle of the generated array
Returns: 3D numpy array (binary) of masked pixel values
"""
kernel = np.ones((k, k), np.float32)
opening = cv2.morphologyEx(pixels, cv2.MORPH_OPEN, kernel)
compare_scans(pixels, opening, viewer='plotly')
if output:
try: os.makedirs(output)
except: pass
with open(os.path.join(output, f'remove_islands_k{k}.pkl'), 'wb') as f:
pickle.dump(opening, f)
return opening
|
from conftest import add_permissions, check_json_response
from core import cache, db
from forums.models import (
Forum,
ForumSubscription,
ForumThread,
ForumThreadSubscription,
)
from forums.permissions import ForumPermissions
def test_subscribe_to_forum(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.post('/subscriptions/forums/5')
check_json_response(response, 'Successfully subscribed to forum 5.')
assert ForumSubscription.from_attrs(user_id=1, forum_id=5)
def test_subscribe_to_forum_already_subscribed(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.post('/subscriptions/forums/2')
check_json_response(response, 'You are already subscribed to forum 2.')
def test_unsubscribe_from_forum(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.delete('/subscriptions/forums/2')
check_json_response(response, 'Successfully unsubscribed from forum 2.')
assert not ForumSubscription.from_attrs(user_id=1, forum_id=2)
def test_unsubscribe_from_forum_not_subscribed(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.delete('/subscriptions/forums/5')
check_json_response(response, 'You are not subscribed to forum 5.')
def test_subscribe_to_thread(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.post('/subscriptions/threads/5')
check_json_response(response, 'Successfully subscribed to thread 5.')
assert ForumThreadSubscription.from_attrs(user_id=1, thread_id=5)
def test_subscribe_to_thread_already_subscribed(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.post('/subscriptions/threads/3')
check_json_response(response, 'You are already subscribed to thread 3.')
def test_unsubscribe_from_thread(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.delete('/subscriptions/threads/3')
check_json_response(response, 'Successfully unsubscribed from thread 3.')
assert not ForumThreadSubscription.from_attrs(user_id=1, thread_id=3)
def test_unsubscribe_from_thread_not_subscribed(app, authed_client):
add_permissions(app, 'forums_subscriptions_modify')
response = authed_client.delete('/subscriptions/threads/5')
check_json_response(response, 'You are not subscribed to thread 5.')
def test_view_my_subscriptions(app, authed_client):
add_permissions(app, 'forums_view_subscriptions')
response = authed_client.get('/subscriptions/forums').get_json()[
'response'
]
assert {1, 2, 4} == {s['id'] for s in response}
def test_view_thread_subscriptions(app, authed_client):
add_permissions(app, 'forums_view_subscriptions')
response = authed_client.get('/subscriptions/threads').get_json()[
'response'
]
assert {1, 3, 4} == {s['id'] for s in response}
def test_view_forum_subscriptions_empty(app, authed_client):
db.engine.execute("DELETE FROM forums_forums_subscriptions")
add_permissions(app, 'forums_view_subscriptions')
response = authed_client.get('/subscriptions/forums').get_json()[
'response'
]
assert response == []
def test_view_thread_subscriptions_empty(app, authed_client):
db.engine.execute("DELETE FROM users_permissions")
add_permissions(app, 'forums_view_subscriptions')
response = authed_client.get('/subscriptions/threads').get_json()[
'response'
]
assert response == []
def test_view_forum_subscriptions_no_forum_perms(app, authed_client):
db.engine.execute("DELETE FROM users_permissions")
add_permissions(app, 'forums_view_subscriptions')
response = authed_client.get('/subscriptions/forums').get_json()[
'response'
]
assert response == []
def test_view_thread_subscriptions_no_forum_perms(app, authed_client):
db.engine.execute(
"DELETE FROM users_permissions WHERE permission LIKE 'forumaccess%%'"
)
add_permissions(app, 'forums_view_subscriptions')
response = authed_client.get('/subscriptions/threads').get_json()[
'response'
]
assert response == []
def test_subscribe_thread_deletes_cache_keys(app, authed_client):
add_permissions(app, ForumPermissions.MODIFY_SUBSCRIPTIONS)
ForumThread.from_subscribed_user(1)
ForumThreadSubscription.user_ids_from_thread(5)
response = authed_client.post('/subscriptions/threads/5')
assert response.status_code == 200
assert not cache.get(
ForumThreadSubscription.__cache_key_users__.format(thread_id=5)
)
assert not cache.get(
ForumThreadSubscription.__cache_key_of_user__.format(user_id=1)
)
assert ForumThreadSubscription.user_ids_from_thread(5) == [1]
def test_unsubscribe_thread_deletes_cache_keys(app, authed_client):
add_permissions(app, ForumPermissions.MODIFY_SUBSCRIPTIONS)
ForumThread.from_subscribed_user(1)
ForumThreadSubscription.user_ids_from_thread(4)
assert cache.get(
ForumThreadSubscription.__cache_key_users__.format(thread_id=4)
)
assert cache.get(
ForumThreadSubscription.__cache_key_of_user__.format(user_id=1)
)
response = authed_client.delete('/subscriptions/threads/4')
assert response.status_code == 200
assert not cache.get(
ForumThreadSubscription.__cache_key_users__.format(thread_id=4)
)
assert not cache.get(
ForumThreadSubscription.__cache_key_of_user__.format(user_id=1)
)
assert ForumThreadSubscription.user_ids_from_thread(4) == [2]
def test_subscribe_forum_deletes_cache_keys(app, authed_client):
add_permissions(app, ForumPermissions.MODIFY_SUBSCRIPTIONS)
Forum.from_subscribed_user(1)
ForumSubscription.user_ids_from_forum(5)
response = authed_client.post('/subscriptions/forums/5')
assert response.status_code == 200
assert not cache.get(
ForumSubscription.__cache_key_users__.format(forum_id=5)
)
assert not cache.get(
ForumSubscription.__cache_key_of_user__.format(user_id=1)
)
assert ForumSubscription.user_ids_from_forum(5) == [3, 4, 1]
def test_unsubscribe_forum_deletes_cache_keys(app, authed_client):
add_permissions(app, ForumPermissions.MODIFY_SUBSCRIPTIONS)
Forum.from_subscribed_user(1)
ForumSubscription.user_ids_from_forum(4)
assert cache.get(ForumSubscription.__cache_key_users__.format(forum_id=4))
assert cache.get(ForumSubscription.__cache_key_of_user__.format(user_id=1))
response = authed_client.delete('/subscriptions/forums/4')
assert response.status_code == 200
assert not cache.get(
ForumSubscription.__cache_key_users__.format(forum_id=4)
)
assert not cache.get(
ForumSubscription.__cache_key_of_user__.format(user_id=1)
)
assert ForumSubscription.user_ids_from_forum(4) == [2]
|
import numpy as np
import xml.etree.ElementTree as ET
import sys
import glob
import os
import matplotlib.pyplot as plt
tableau_colors = (
(114/255., 158/255., 206/255.),
(255/255., 158/255., 74/255.),
(103/255., 191/255., 92/255.),
(237/255., 102/255., 93/255.),
(173/255., 139/255., 201/255.),
(168/255., 120/255., 110/255.),
(237/255., 151/255., 202/255.),
(162/255., 162/255., 162/255.),
(205/255., 204/255., 93/255.),
(109/255., 204/255., 218/255.),
(144/255., 169/255., 202/255.),
(225/255., 157/255., 90/255.),
(122/255., 193/255., 108/255.),
(225/255., 122/255., 120/255.),
(197/255., 176/255., 213/255.),
(196/255., 156/255., 148/255.),
(247/255., 182/255., 210/255.),
(199/255., 199/255., 199/255.),
(219/255., 219/255., 141/255.),
(158/255., 218/255., 229/255.),
(227/255., 119/255., 194/255.),
(219/255., 219/255., 141/255.),
(174/255., 199/255., 232/255.),
(109/255., 204/255., 218/255.),
(114/255., 158/255., 206/255.),
(199/255., 199/255., 199/255.),
(188/255., 189/255., 34/255.),
(255/255., 127/255., 14/255.),
(255/255., 125/255., 150/255.),
(196/255., 156/255., 148/255.),
(177/255., 3/255., 24/255.),
(0/255., 107/255., 164/255),
(198/255., 118/255., 255/255.), #dsm added, not Tableau
(58/255., 208/255., 129/255.),
)
def getAverage(l):
if len(l)==0:
return 0.0
else:
return float(sum(l)) / float(len(l))
TOTAL_COLOR_NUM=33
colors = [2, 3, 5, 1, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
#app_string="lobsters amahiPlatform fulcrum linuxfr onebody rucksack sugar boxroom jobsworth kandan publify railscollab rucksack sharetribe tracks brevidy communityengine"
app_string="forem lobsters linuxfr sugar kandan onebody communityengine diaspora calagator rucksack railscollab jobsworth gitlab kanban fulcrum tracks boxroom brevidy wallgig shoppe amahiPlatform sharetribe enki publify"
#app_string="sharetribe enki publify"
applications = app_string.split(" ")
result_path = "../../applications/general_stats/"
width = 0.2
applications_on_figure = []
i=1
for a in applications:
temp_str = a[0:3].upper()
applications_on_figure.append(temp_str)
i += 1
roots = {}
def plot_alignedbar_plot(plt, ary_list, legends, stat_name, Ylabel, stack_array):
fig = plt.figure(figsize=(7,3))
ax = fig.add_subplot(111)
N = len(applications)
ind = np.arange(N)
rects = []
j=0
data_acc = {}
for i in stack_array:
data_acc[i] = []
for j in ary_list[0]:
data_acc[i].append(0)
j = 0
for ary in ary_list:
rects.append(ax.bar(ind+width*stack_array[j], ary, width, bottom=data_acc[stack_array[j]], color=tableau_colors[colors[j]]))
k = 0
for d in ary:
data_acc[stack_array[j]][k] += d
k += 1
j+=1
legend = ax.legend(rects, legends, loc='upper right', prop={'size':'10'})
plt.ylabel(Ylabel, fontsize=10)
plt.xticks(ind,applications_on_figure,rotation='vertical')
plt.tick_params(labelsize=10)
plt.tight_layout()
#plt.show()
fig.savefig("%s/general_%s.pdf"%(result_path, stat_name))
def plot_stack_plot(plt, ary_list, legends, stat_name, Ylabel):
#print stat_name
#i=0
#for ary in ary_list:
# print legends[i]
# print ary
# i += 1
fig = plt.figure(figsize=(7,3))
ax = fig.add_subplot(111)
N = len(applications)
ind = np.arange(N)
bottom_data = []
rects = []
i = 0
for ary in ary_list:
print "%s\t"%legends[i],
for k in ary:
print "%f, "%k,
print ""
i += 1
for i in range(N):
bottom_data.append(0)
j = 0
for ary in ary_list:
print "%s: avg = %f"%(legends[j], getAverage(ary))
#data of all apps in a single category
rects.append(ax.bar(ind, ary, width, bottom=bottom_data, color=tableau_colors[colors[j]]))
j += 1
for i in range(N):
bottom_data[i] += ary[i]
if len(legends) > 15:
ax.legend(rects, legends, loc='upper right', prop={'size':'4'})
else:
legend = ax.legend(rects, legends, prop={'size':'10'}, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.ylabel(Ylabel, fontsize=10)
plt.xticks(ind,applications_on_figure,rotation='vertical')
plt.tick_params(labelsize=10)
plt.tight_layout()
#plt.show()
fig.savefig("%s/general_%s.pdf"%(result_path, stat_name))
def general_plot(stat_name, Ylabel, alignedBar=False, alignedArray={}):
ary_list = []
legends = []
name_map = {}
for app in applications:
for node in roots[app]:
if node.tag == stat_name:
for c in node:
if c.tag not in legends:
name_map[c.tag] = len(ary_list)
ary_list.append([])
legends.append(c.tag)
for app in applications:
for node in roots[app]:
if node.tag == stat_name:
included = []
for child in node:
included.append(child.tag)
ary_list[name_map[child.tag]].append(float(child.text))
for l in legends:
if l not in included:
ary_list[name_map[l]].append(0)
#ary_list[i].append(float(child.text))
#if len(legends) > i:
# for j in range(len(legends)-i):
# ary_list[i].append(0)
# i += 1
if stat_name == "queryGeneral":
temp_ary = {}
for app in applications:
temp_ary[app] = 0
i = 0
for app in applications:
for ary in ary_list:
temp_ary[app] += ary[i]
i = i + 1
print "TOTAL QUERY:"
for app in applications:
temp_ary[app] = temp_ary[app] * len(roots[app])
print "%s: %d"%(app, temp_ary[app])
if alignedBar:
stack_array = []
k = 0
for l in legends:
if l in alignedArray:
stack_array.append(alignedArray[l])
else:
stack_array.append(k)
k += 1
plot_alignedbar_plot(plt, ary_list, legends, stat_name, Ylabel, stack_array)
else:
plot_stack_plot(plt, ary_list, legends, stat_name, Ylabel)
def plot_field_stat(stat_name):
x_data = []
y_data = []
types = []
max_x = 0
max_y = 0
i = 0
type_map = {}
for app in applications:
for node in roots[app]:
if node.tag == stat_name:
for c in node:
if c.text != "MAX" and float(c.text) > max_y:
max_y = float(c.text)
if float(c.attrib["numUse"]) > max_x:
max_x = float(c.attrib["numUse"])
if c.attrib["type"] not in types:
types.append(c.attrib["type"])
for t in types:
x_data.append([])
y_data.append([])
type_map[t] = i
i += 1
i = 0
for app in applications:
for node in roots[app]:
if node.tag == stat_name:
for c in node:
ind = type_map[c.attrib["type"]]
x_data[ind].append(float(c.attrib["numUse"]))
if c.text == "MAX":
y_data[ind].append(max_y*1.5)
else:
y_data[ind].append(float(c.text))
i += 1
fig = plt.figure(figsize=(7,3))
ax = fig.add_subplot(111)
i = 0
legend_scat = []
#for app in applications:
for t in types:
l = ax.scatter(x_data[i], y_data[i], color=tableau_colors[i%TOTAL_COLOR_NUM], label=app)
legend_scat.append(l)
i += 1
#ax.legend(legend_scat, applications, bbox_to_anchor=(1.05, 1.05), prop={'size':'10'})
ax.legend(legend_scat, types, bbox_to_anchor=(1.05, 1.05), prop={'size':'10'})
ax.set_ylim(0, max_y*1.6)
ax.set_xlim(0, max_x*1.1)
plt.ylabel("avg path distance to source", fontsize=10)
plt.xlabel("avg #use per controller action", fontsize=10)
plt.tight_layout()
fig.savefig("%s/general_%s.pdf"%(result_path, stat_name))
def plot_table_stat(stat_name):
x_data = []
y_data = []
i = 0
x_max = 0
y_max = 0
for app in applications:
x_data.append([])
y_data.append([])
for node in roots[app]:
if node.tag == stat_name:
for c in node:
x_data[i].append(float(c.text))
if float(c.text) > x_max:
x_max = float(c.text)
y_data[i].append(float(c.attrib["write"]))
if float(c.attrib["write"]) > y_max:
y_max = float(c.attrib["write"])
i += 1
fig = plt.figure(figsize=(7,3))
ax = fig.add_subplot(111)
i = 0
legend_scat = []
for app in applications:
l = ax.scatter(x_data[i], y_data[i], color=tableau_colors[i%TOTAL_COLOR_NUM], label=app)
legend_scat.append(l)
i += 1
ax.legend(legend_scat, applications, bbox_to_anchor=(1.05, 1.05), prop={'size':'10'})
plt.ylabel("avg #write queries on a table", fontsize=10)
plt.xlabel("avg #read queries on a table", fontsize=10)
ax.set_ylim(0, y_max*1.1)
ax.set_xlim(0, x_max*1.6)
plt.tight_layout()
fig.savefig("%s/general_%s.pdf"%(result_path, stat_name))
def plot_index_stat(stat_name):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(211)
N = len(applications)
ind = np.arange(N)
x_index_data = []
y_index_data = []
y_index_max = 0
x_noindex_data = []
y_noindex_data = []
y_noindex_max = 0
i = 0
for app in applications:
x_index_data.append([])
y_index_data.append([])
x_noindex_data.append([])
y_noindex_data.append([])
for node in roots[app]:
if node.tag == stat_name:
for c1 in node:
if c1.tag == "fieldHasIndex":
for c in c1:
y_index_data[i].append(float(c.text))
x_index_data[i].append(i)
elif c1.tag == "fieldHasNoIndex":
for c in c1:
y_noindex_data[i].append(float(c.text))
x_noindex_data[i].append(i)
i += 1
i = 0
print y_index_data
for app in applications:
l = ax.scatter(x_index_data[i], y_index_data[i], color=tableau_colors[i%TOTAL_COLOR_NUM], label=app)
i += 1
plt.tick_params(labelsize=6)
plt.xticks(ind,applications_on_figure,rotation='vertical')
plt.ylabel("Avg use in select condition", fontsize=10)
plt.xlabel("Field with index", fontsize=10)
plt.tight_layout()
ax = fig.add_subplot(212)
i = 0
for app in applications:
l = ax.scatter(x_noindex_data[i], y_noindex_data[i], color=tableau_colors[i%TOTAL_COLOR_NUM], label=app)
i += 1
plt.tick_params(labelsize=6)
plt.xticks(ind,applications_on_figure,rotation='vertical')
plt.ylabel("Avg use in select condition", fontsize=10)
plt.xlabel("Field without index", fontsize=10)
plt.tight_layout()
fig.savefig("%s/general_%s.pdf"%(result_path, stat_name))
if os.path.isdir(result_path) == False:
os.system("mkdir %s"%result_path)
for app in applications:
print "python stats.py %s %s/%s_stat.xml"%(app, result_path, app)
os.system("python stats.py %s %s/%s_stat.xml"%(app, result_path, app))
#print "python collect_nextaction.py %s "%(app)
#os.system("python collect_nextaction.py %s %s/nextaction_%s_stat.xml"%(app, result_path, app))
#print "python collect_funcdeps.py %s %s/%s_funcdeps.log"%(app, result_path, app)
#os.system("python collect_funcdeps.py %s %s/%s_funcdeps.log"%(app, result_path, app))
fname = "%s/%s_stat.xml"%(result_path, app)
print fname
tree = ET.parse(fname)
roots[app] = tree.getroot()
print ""
#plot_index_stat("indexStat")
stats=["queryGeneral","redundantField", "queryCard", "queryOnlyToQuery", "fieldOnlyConst", "fieldNoInput", "queryPartial"]
#stats = ["queryGeneral","branch","branchInView","queryInView","usedInView","usedSQLString","onlyFromUser","inClosure","readSink","readSource","writeSource","TableInView","assocQuery","transaction","transactionNested", "queryString", "affectedInControlflow", "queryFunction","viewClosure","loopWhile","constStat","inputReaches","loopNestedDepth","inputAffectPath","inputAffectQuery","queryCardinality","redundantData","tableFieldFuncdep","closureCarryDep","queryDependency"]
YaxisLabel = {}
YaxisLabel["queryOnlyToQuery"] = "Avg #queries in an action"
YaxisLabel["queryPartial"] = "Avg #queries in an action"
YaxisLabel["queryGeneral"] = "Avg #queries in an action"
YaxisLabel["fieldOnlyConst"] = "#fields"
YaxisLabel["fieldNoInput"] = "#fields"
YaxisLabel["branch"] = "Avg #branch in an action"
YaxisLabel["branchInView"] = "Avg #branch in an action"
YaxisLabel["queryInView"] = "Avg #Q in an action"
YaxisLabel["usedInView"] = "Average #readQ in an action"
YaxisLabel["usedSQLString"] = "Average #readQ in an action"
YaxisLabel["materialized"] = "Average #readQ returning full record in an action"
YaxisLabel["onlyFromUser"] = "Average #Q in an action"
YaxisLabel["inClosure"] = "Avg #Q in an action"
YaxisLabel["readSink"] = "Avg #sink nodes for each query"
YaxisLabel["readSource"] = "Avg #source nodes for each query"
YaxisLabel["writeSource"] = "Avg #source nodes for each query"
YaxisLabel["TableInView"] = "#tables"
YaxisLabel["FieldInView"] = "#fields from all tables"
YaxisLabel["transaction"] = "#queries in a transaction"
YaxisLabel["transactionNested"] = "#transactions added from all actions"
YaxisLabel["queryString"] = "Average used frequency in each read query"
YaxisLabel["queryFunction"] = "Average used frequency in each query"
YaxisLabel["constStat"] = "Average used frequency in each read query"
YaxisLabel["inputReaches"] = "Breakdown of input affecting % of queries"
YaxisLabel["tableStat"] = "Breakdown of #Q by tables in an action"
YaxisLabel["path"] = "Number of instructions"
YaxisLabel["loopNestedDepth"] = "Avg #loop in an action"
YaxisLabel["queryCard"] = "Avg #query in an action"
YaxisLabel["inputAffectPath"] = "#nodes"
YaxisLabel["inputAffectQuery"] = "#queries"
YaxisLabel["tableFieldFuncdep"] = "#fields"
YaxisLabel["selectCondition"] = "#tables"
YaxisLabel["redundantField"] = "field size by byte"
YaxisLabel["longestQueryPath"] = "#queries"
YaxisLabel["orderFunction"] = "percentage of order/non_order queries in an action"
YaxisLabel["orderField"] = "#tables"
YaxisLabel["loopWhile"] = "Avg #loops in an action"
YaxisLabel["closureCarryDep"] = "Avg #queries in loop in an action"
YaxisLabel["assocQuery"] = "Avg #queries in an action"
YaxisLabel["affectedInControlflow"] = "Avg #queries in an action"
YaxisLabel["viewClosure"] = "Avg #view rendering in an action"
YaxisLabel["queryDependency"] = "Avg #queries in an action"
for s in stats:
print ""
print "=============="
print "printing %s..."%s
general_plot(s, YaxisLabel[s])
exit(0)
plot_field_stat("tableFieldStat")
plot_field_stat("nonFieldStat")
plot_table_stat("tableStat")
QP_stack = {}
QP_stack["longestQueryPathLoopCarry"] = 0
QP_stack["longestQueryPathLoopNoCarry"] = 0
QP_stack["longestQueryPathNotInLoop"] = 0
QP_stack["totalQueryNumber"] = 1
general_plot("longestQueryPath", YaxisLabel["longestQueryPath"], True, QP_stack)
general_plot("path", YaxisLabel["path"], True, {})
#NA stands for NextAction
roots = {}
#amahiPlatform communityengine jobsworth linuxfr piggybak rucksack wallgig
#boxroom communityengine_with_log jobsworth_with_log lobsters publify sharetribe
#boxroom_with_log diaspora jobsworth_with_logs lobsters.txt publify_with_log shoppe
#Brevidy enki kanban lobsters_with_log railscollab sugar
#calagator fulcrum kandan onebody railscollab_with_log tracks
#app_string="fulcrum rucksack boxroom publify brevidy tracks lobsters communityengine linuxfr railscollab shoppe amahiPlatform enki wallgig"
app_string = "boxroom lobsters wallgig linuxfr kandan brevidy onebody communityengine calagator kanban fulcrum rucksack railscollab jobsworth tracks shoppe amahiPlatform sharetribe publify enki"
applications = app_string.split(" ")
applications_on_figure = []
i=1
for a in applications:
temp_str = a[0:3].upper()
applications_on_figure.append(temp_str)
i += 1
for app in applications:
fname = "%s/nextaction_%s_stat.xml"%(result_path, app)
print fname
tree = ET.parse(fname)
roots[app] = tree.getroot()
general_plot("nextAction", "Average #Q in next action")
|
import datetime as dt
import json
import logging
import os
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
# Retrieve region where Lambda is being executed
region_name = os.environ["AWS_REGION"] # "ap-southeast-1"
# Create a client for the AWS Analytical service to use
client = boto3.client("glue")
sagemaker = boto3.client("sagemaker")
def datetimeconverter(o):
if isinstance(o, dt.datetime):
return o.__str__()
def check_job_status(job_details):
# This function checks the status of the currently running job
job_response = client.get_job_run(
JobName=job_details["jobName"], RunId=job_details["jobRunId"]
)
json_data = json.loads(json.dumps(job_response, default=datetimeconverter))
# IMPORTANT update the status of the job based on the job_response (e.g RUNNING, SUCCEEDED, FAILED)
job_details["jobStatus"] = json_data.get("JobRun").get("JobRunState")
response = {"jobDetails": job_details}
return response
def send_pipeline_execution_success(token):
try:
sagemaker.send_pipeline_execution_step_success(
CallbackToken=token,
OutputParameters=[
{
"Name": "final_status",
"Value": "Glue Job finished.",
}
],
)
except:
logger.info(
(
"An error occurred: Step GluePrepCallbackStep is already"
" in a terminal status, but Step function shouldbe"
)
)
def lambda_handler(event, context):
"""Calls custom job waiter developed by user
Arguments:
event {dict} -- Dictionary with details on previous processing step
context {dict} -- Dictionary with details on Lambda context
Returns:
{dict} -- Dictionary with Processed Bucket, Key(s) and Job Details
"""
try:
logger.info("Lambda event is [{}]".format(event))
job_details = event["body"]["job"]["Payload"]["jobDetails"]
logger.info("Checking Job Status with user custom code")
# transform_handler = TransformHandler().stage_transform(team, dataset, stage)
response = check_job_status(job_details) # custom user code called
if response["jobDetails"]["jobStatus"] == "SUCCEEDED":
send_pipeline_execution_success(job_details["token"])
elif response["jobDetails"]["jobStatus"] == "FAILED":
sagemaker.send_pipeline_execution_step_failure(
CallbackToken=job_details["token"], FailureReason="unknown reason"
)
logger.info("Response is [{}]".format(response))
except Exception as e:
logger.error("Fatal error", exc_info=True)
sagemaker.send_pipeline_execution_step_failure(
CallbackToken=job_details["token"], FailureReason=str(e)
)
raise e
return response
|
#!/usr/bin/python3
#sudo pm2 start led_manager.py --name led_manager --interpreter python3
import signal
import time
import sys
import paho.mqtt.client as mqtt
import json
import random
import subprocess
import re
import threading
mqttClient = None
canPublish = False
connected_to_wifi = False
charging_battery = False
charging_event = threading.Event()
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
global canPublish
print("Connected with result code "+str(rc))
canPublish = True
client.subscribe("pibot/power/state")
client.subscribe("pibot/wifi/state")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global charging_event
global charging_battery
global connected_to_wifi
#print(msg.topic+" "+str(msg.payload))
try:
if msg.topic == "pibot/wifi/state":
json_data = json.loads(msg.payload)
connected_to_wifi = bool(json_data["connected"])
if msg.topic == "pibot/power/state":
json_data = json.loads(msg.payload)
charging_battery = bool(json_data["charging"])
charging_event.set()
except Exception as e:
print(f"error : {e}")
def wifi_led():
global connected_to_wifi
global mqttClient
global canPublish
while True:
try:
if canPublish:
if connected_to_wifi:
mqttClient.publish("pibot/leds/cmd", json.dumps({"2" : "0000FF"}))
else:
mqttClient.publish("pibot/leds/cmd", json.dumps({"2" : "000000"}))
time.sleep(1)
mqttClient.publish("pibot/leds/cmd", json.dumps({"2" : "0000FF"}))
time.sleep(1)
except Exception as e:
print(f"error : {e}")
def charging_led():
global charging_battery
global charging_event
global mqttClient
global canPublish
while True:
try:
print(f"charg : {charging_battery}")
if canPublish:
if charging_battery:
mqttClient.publish("pibot/leds/cmd", json.dumps({"3" : "FF0000"}))
else:
mqttClient.publish("pibot/leds/cmd", json.dumps({"3" : "000000"}))
charging_event.wait(2)
charging_event.clear()
except Exception as e:
print(f"error : {e}")
def main():
global mqttClient
global canPublish
global connected_to_wifi
#application initialization
mqttClient = mqtt.Client()
mqttClient.on_connect = on_connect
mqttClient.on_message = on_message
#mqttClient.username_pw_set("login", "password")
mqttClient.connect("127.0.0.1", 1883, 60)
mqttClient.loop_start()
threading.Thread(name='wifi_led', target=wifi_led).start()
threading.Thread(name='charging_led', target=charging_led).start()
while True:
time.sleep(1)
def exit_gracefully(signum, frame):
global mqttClient
# restore the original signal handler as otherwise evil things will happen
# in input when CTRL+C is pressed, and our signal handler is not re-entrant
signal.signal(signal.SIGINT, original_sigint)
try:
if mqttClient is not None:
mqttClient.disconnect()
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
# restore the exit gracefully handler here
signal.signal(signal.SIGINT, exit_gracefully)
if __name__ == '__main__':
# store the original SIGINT handler
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, exit_gracefully)
main()
|
#!/usr/bin/env python3
"""
NAME: lf_cleanup.py
PURPOSE: clean up stations, cross connects and endpoints
EXAMPLE: ./lf_cleanup.py --mgr <lanforge ip>
Copyright 2021 Candela Technologies Inc
License: Free to distribute and modify. LANforge systems must be licensed.
"""
import sys
import os
import importlib
import argparse
import time
if sys.version_info[0] != 3:
print("This script requires Python 3")
exit(1)
sys.path.append(os.path.join(os.path.abspath(__file__ + "../../../")))
LFUtils = importlib.import_module("py-json.LANforge.LFUtils")
realm = importlib.import_module("py-json.realm")
Realm = realm.Realm
class lf_clean(Realm):
def __init__(self,
host="localhost",
port=8080,
resource=1,
clean_cxs=None,
clean_endp=None,
clean_sta=None):
super().__init__(lfclient_host=host,
lfclient_port=port),
self.host = host
self.port = port
self.resource = resource
self.clean_cxs = clean_cxs
self.clean_endp = clean_endp
self.clean_sta = clean_sta
self.cxs_done = False
self.endp_done = False
self.sta_done = False
self.br_done = False
self.misc_done = False
def cxs_clean(self):
still_looking_cxs = True
iterations_cxs = 1
while still_looking_cxs and iterations_cxs <= 10:
iterations_cxs += 1
print("cxs_clean: iterations_cxs: {iterations_cxs}".format(iterations_cxs=iterations_cxs))
cx_json = super().json_get("cx")
if cx_json is not None:
print("Removing old cross connects")
for name in list(cx_json):
if name != 'handler' and name != 'uri' and name != 'empty':
print(name)
req_url = "cli-json/rm_cx"
data = {
"test_mgr": "default_tm",
"cx_name": name
}
super().json_post(req_url, data)
time.sleep(.5)
time.sleep(1)
else:
print("No cross connects found to cleanup")
still_looking_cxs = False
print("clean_cxs still_looking_cxs {cxs_looking}".format(cxs_looking=still_looking_cxs))
if not still_looking_cxs:
self.cxs_done = True
return still_looking_cxs
def endp_clean(self):
still_looking_endp = True
iterations_endp = 0
while still_looking_endp and iterations_endp <= 10:
iterations_endp += 1
print("endp_clean: iterations_endp: {iterations_endp}".format(iterations_endp=iterations_endp))
# get and remove current endps
endp_json = super().json_get("endp")
if endp_json is not None:
print("Removing old endpoints")
for name in list(endp_json['endpoint']):
print(list(name)[0])
if name[list(name)[0]]["name"] == '':
continue
req_url = "cli-json/rm_endp"
data = {
"endp_name": list(name)[0]
}
print(data)
super().json_post(req_url, data)
time.sleep(.5)
time.sleep(1)
else:
print("No endpoints found to cleanup")
still_looking_endp = False
print("clean_endp still_looking_endp {ednp_looking}".format(ednp_looking=still_looking_endp))
if not still_looking_endp:
self.endp_done = True
return still_looking_endp
def sta_clean(self):
still_looking_sta = True
iterations_sta = 0
while still_looking_sta and iterations_sta <= 10:
iterations_sta += 1
print("sta_clean: iterations_sta: {iterations_sta}".format(iterations_sta=iterations_sta))
try:
sta_json = super().json_get(
"port/1/{resource}/list?field=alias".format(resource=self.resource))['interfaces']
except TypeError:
sta_json = None
print("sta_json set to None")
# get and remove current stations
if sta_json is not None:
# print(sta_json)
print("Removing old stations ")
for name in list(sta_json):
for alias in list(name):
# print("alias {alias}".format(alias=alias))
if 'sta' in alias:
info = self.name_to_eid(alias)
req_url = "cli-json/rm_vlan"
data = {
"shelf": info[0],
"resource": info[1],
"port": info[2]
}
# print(data)
super().json_post(req_url, data)
time.sleep(.5)
if 'wlan' in alias:
info = self.name_to_eid(alias)
req_url = "cli-json/rm_vlan"
data = {
"shelf": info[0],
"resource": info[1],
"port": info[2]
}
# print(data)
super().json_post(req_url, data)
time.sleep(.5)
if 'moni' in alias:
info = self.name_to_eid(alias)
req_url = "cli-json/rm_vlan"
data = {
"shelf": info[0],
"resource": info[1],
"port": info[2]
}
# print(data)
super().json_post(req_url, data)
time.sleep(.5)
if 'Unknown' in alias:
info = self.name_to_eid(alias)
req_url = "cli-json/rm_vlan"
data = {
"shelf": info[0],
"resource": info[1],
"port": info[2]
}
# print(data)
super().json_post(req_url, data)
time.sleep(.5)
time.sleep(1)
else:
print("No stations found to cleanup")
still_looking_sta = False
print("clean_sta still_looking_sta {sta_looking}".format(sta_looking=still_looking_sta))
if not still_looking_sta:
self.sta_done = True
return still_looking_sta
def bridge_clean(self):
still_looking_br = True
iterations_br = 0
while still_looking_br and iterations_br <= 10:
iterations_br += 1
print("bridge_clean: iterations_br: {iterations_br}".format(iterations_br=iterations_br))
try:
br_json = super().json_get(
"port/1/1/list?field=alias")['interfaces']
except TypeError:
br_json = None
# get and remove current stations
if br_json is not None:
# print(br_json)
print("Removing old bridges ")
for name in list(br_json):
for alias in list(name):
if 'br' in alias:
print(alias)
info = self.name_to_eid(alias)
req_url = "cli-json/rm_vlan"
data = {
"shelf": info[0],
"resource": info[1],
"port": info[2]
}
# print(data)
super().json_post(req_url, data)
time.sleep(.5)
time.sleep(1)
else:
print("No bridges found to cleanup")
still_looking_br = False
print("clean_bridge still_looking_br {br_looking}".format(br_looking=still_looking_br))
if not still_looking_br:
self.br_done = True
return still_looking_br
# Some test have various station names or a station named 1.1.eth2
def misc_clean(self):
still_looking_misc = True
iterations_misc = 0
while still_looking_misc and iterations_misc <= 10:
iterations_misc += 1
print("misc_clean: iterations_misc: {iterations_misc}".format(iterations_misc=iterations_misc))
try:
misc_json = super().json_get(
"port/1/1/list?field=alias")['interfaces']
except TypeError:
misc_json = None
# get and remove current stations
if misc_json is not None:
print(misc_json)
print("Removing misc station names phy, 1.1.eth (malformed station name) ")
for name in list(misc_json):
for alias in list(name):
if 'phy' in alias and 'wiphy' not in alias:
print(alias)
info = self.name_to_eid(alias)
req_url = "cli-json/rm_vlan"
data = {
"shelf": info[0],
"resource": info[1],
"port": info[2]
}
# print(data)
super().json_post(req_url, data)
time.sleep(.5)
if '1.1.1.1.eth' in alias:
print('alias 1.1.1.1.eth {alias}'.format(alias=alias))
# need to hand construct for delete.
info = alias.split('.')
print('info {info}'.format(info=info))
req_url = "cli-json/rm_vlan"
info_2 = "{info2}.{info3}.{info4}".format(info2=info[2], info3=info[3], info4=info[4])
data = {
"shelf": info[0],
"resource": info[1],
"port": info_2
}
print(data)
super().json_post(req_url, data)
time.sleep(.5)
time.sleep(1)
else:
print("No misc found to cleanup")
still_looking_misc = False
print("clean_misc still_looking_misc {misc_looking}".format(misc_looking=still_looking_misc))
if not still_looking_misc:
self.misc_done = True
return still_looking_misc
'''
1: delete cx
2: delete endp
3: delete sta
when deleting sta first, you will end up with phantom CX
'''
def cleanup(self):
if self.clean_cxs:
# also clean the endp when cleaning cxs
still_looking_cxs = self.cxs_clean()
still_looking_endp = self.endp_clean()
print("clean_cxs: still_looking_cxs {looking_cxs} still_looking_endp {looking_endp}".format(
looking_cxs=still_looking_cxs, looking_endp=still_looking_endp))
if self.clean_endp and not self.clean_cxs:
still_looking_endp = self.endp_clean()
print("clean_endp: still_looking_endp {looking_endp}".format(looking_endp=still_looking_endp))
if self.clean_sta:
still_looking_sta = self.sta_clean()
print("clean_sta: still_looking_sta {looking_sta}".format(looking_sta=still_looking_sta))
if self.clean_br:
still_looking_br = self.bridge_clean()
print("clean_br: still_looking_br {looking_br}".format(looking_br=still_looking_br))
if self.clean_misc:
still_looking_misc = self.misc_clean()
print("clean_misc: still_looking_misc {looking_misc}".format(looking_misc=still_looking_misc))
def main():
parser = argparse.ArgumentParser(
prog='lf_cleanup.py',
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
Clean up cxs and endpoints
''',
description='''\
lf_cleanup.py:
--------------------
Generic command layout:
python3 ./lf_clean.py --mgr MGR
default port is 8080
clean up stations, cxs and enspoints.
NOTE: will only cleanup what is present in the GUI
So will need to iterate multiple times with script
''')
parser.add_argument(
'--mgr',
'--lfmgr',
help='--mgr <hostname for where LANforge GUI is running>',
default='localhost')
parser.add_argument(
'--resource',
'--res',
help='--resource <realm resource>',
default='1')
parser.add_argument(
'--cxs',
help="--cxs, this will clear all the endps and cxs",
action='store_true')
parser.add_argument(
'--endp',
help="--endp, this will clear all the endps",
action='store_true')
parser.add_argument(
'--sta',
help="--sta, this will clear all the stations",
action='store_true')
parser.add_argument(
'--br',
help="--br, this will clear all the bridges",
action='store_true')
parser.add_argument(
'--misc',
help="--misc, this will clear sta with names phy (not wiphy) and 1.1.eth stations",
action='store_true')
args = parser.parse_args()
if args.cxs or args.endp or args.sta or args.br or args.misc:
clean = lf_clean(host=args.mgr, resource=int(args.resource), clean_cxs=args.cxs, clean_endp=args.endp, clean_sta=args.sta)
print("cleaning cxs: {cxs} endpoints: {endp} stations: {sta} start".format(cxs=args.cxs, endp=args.endp, sta=args.sta))
if args.cxs:
print("cleaning cxs will also clean endp")
clean.cxs_clean()
clean.endp_clean()
if args.endp and not args.cxs:
clean.endp_clean()
if args.sta:
clean.sta_clean()
if args.br:
clean.bridge_clean()
if args.misc:
clean.misc_clean()
print("Clean done")
# print("Clean cxs_done {cxs_done} endp_done {endp_done} sta_done {sta_done}"
# .format(cxs_done=clean.cxs_done,endp_done=clean.endp_done,sta_done=clean.sta_done))
else:
print("please add option of --cxs ,--endp, --sta , --br, --misc to clean")
if __name__ == "__main__":
main()
|
# Copyright 2020 Dylan Baker
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of an immutable Mapping."""
from __future__ import annotations
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore
import typing
__all__ = [
'ConstMapping',
'ConstDict',
]
KT = typing.TypeVar('KT')
VT = typing.TypeVar('VT')
class ConstMapping(Protocol[KT, VT]):
"""A Protocol for an immutable Mapping.
Provides no methods for mutating the mapping, although the fact is that is a
mapping and can be mutated, even if mypy says that's an error.
This is different than the Mapping in typing in that it's a Protocol, so
any object that implements this Protocol will work correctly.
"""
def __contains__(self, key: KT) -> bool: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
def __iter__(self) -> typing.Iterator[KT]: ...
def __len__(self) -> int: ...
def __reversed__(self) -> typing.Iterable[KT]: ...
# TODO: do we need a MutableMappingProtocol?
def copy(self) -> typing.MutableMapping[KT, VT]: ...
def get(self, key: KT, default: typing.Optional[VT]) -> typing.Optional[VT]: ...
def items(self) -> typing.ItemsView[KT, VT]: ...
def keys(self) -> typing.KeysView[KT]: ...
def values(self) -> typing.ValuesView[VT]: ...
class ConstDict(Protocol[KT, VT]):
"""A Protocol for an immutable Mapping.
Provides no methods for mutating the mapping, although the fact is that is a
mapping and can be mutated, even if mypy says that's an error.
This is different than the Mapping in typing in that it's a Protocol, so
any object that implements this Protocol will work correctly.
"""
def __contains__(self, key: KT) -> bool: ...
def __eq__(self, other: object) -> bool: ...
def __ne__(self, other: object) -> bool: ...
def __iter__(self) -> typing.Iterator[KT]: ...
def __len__(self) -> int: ...
def __reversed__(self) -> typing.Iterable[KT]: ...
def copy(self) -> typing.Dict[KT, VT]: ...
def get(self, key: KT, default: typing.Optional[VT]) -> typing.Optional[VT]: ...
def items(self) -> typing.ItemsView[KT, VT]: ...
def keys(self) -> typing.KeysView[KT]: ...
def values(self) -> typing.ValuesView[VT]: ...
|
"""Main library entrypoint."""
import copy
import json
import io
import os
import sys
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.estimator.util import fn_args
from google.protobuf import text_format
from opennmt.utils import hooks, checkpoint, misc
from opennmt.utils.evaluator import external_evaluation_fn
from opennmt.utils.misc import format_translation_output, OrderRestorer
# These options require a value but we can fallback to a default one.
_CONFIG_FALLBACK = {
"params": {},
"train": {
"batch_type": "examples",
"bucket_width": 1,
"sample_buffer_size": 500000,
},
"eval": {
"batch_size": 32,
"eval_delay": 18000,
"exporters": "last"
},
"infer": {
"bucket_width": None,
"batch_size": 16
},
"score": {
"batch_size": 64
}
}
class Runner(object):
"""Class for managing training, inference, and export. It is mostly a
wrapper around ``tf.estimator.Estimator``.
"""
def __init__(self,
model,
config,
seed=None,
num_devices=1,
gpu_allow_growth=False,
session_config=None,
auto_config=False):
"""Initializes the runner parameters.
Args:
model: A :class:`opennmt.models.model.Model` instance to run.
config: The run configuration.
seed: The random seed to set.
num_devices: The number of devices (GPUs) to use for training.
gpu_allow_growth: Allow GPU memory to grow dynamically.
session_config: ``tf.ConfigProto`` overrides.
auto_config: If ``True``, use automatic configuration values defined by
:obj:`model`.
Raises:
NotImplementedError: If :obj:`auto_config` is ``True`` but :obj:`model`
does not define any automatic configuration values.
"""
self._model = model
self._num_devices = num_devices
# Configuration priority: user config > auto config > default config.
self._config = copy.deepcopy(_CONFIG_FALLBACK)
if auto_config:
model_config = self._model.auto_config(num_devices=num_devices)
if not model_config:
raise NotImplementedError("This model does not define any automatic configuration values")
misc.merge_dict(self._config, model_config)
misc.merge_dict(self._config, config)
tf.logging.info(
"Using parameters: %s", json.dumps(self._config, indent=2, sort_keys=True))
session_config_base = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(
allow_growth=gpu_allow_growth))
# Disable layout optimizer for better conv1d performance, see:
# https://github.com/tensorflow/tensorflow/issues/20309
# This field does not exist in TensorFlow 1.4, so guard against the
# exception.
try:
rewrite_options = text_format.Parse("""
graph_options {
rewrite_options {
layout_optimizer: OFF
}
}
""", tf.ConfigProto())
session_config_base.MergeFrom(rewrite_options)
except text_format.ParseError:
pass
if session_config is not None:
session_config_base.MergeFrom(session_config)
session_config = session_config_base
run_config = tf.estimator.RunConfig(
model_dir=self._config["model_dir"],
session_config=session_config,
tf_random_seed=seed)
# Create a first session to enforce GPU options.
# See https://github.com/OpenNMT/OpenNMT-tf/issues/80.
_ = tf.Session(config=session_config)
np.random.seed(seed)
random.seed(seed)
if "train" in self._config:
if "save_summary_steps" in self._config["train"]:
run_config = run_config.replace(
save_summary_steps=self._config["train"]["save_summary_steps"],
log_step_count_steps=self._config["train"]["save_summary_steps"])
if "save_checkpoints_steps" in self._config["train"]:
run_config = run_config.replace(
save_checkpoints_secs=None,
save_checkpoints_steps=self._config["train"]["save_checkpoints_steps"])
if "keep_checkpoint_max" in self._config["train"]:
run_config = run_config.replace(
keep_checkpoint_max=self._config["train"]["keep_checkpoint_max"])
self._estimator = tf.estimator.Estimator(
self._model.model_fn(
num_devices=self._num_devices,
eval_prediction_hooks_fn=self._make_eval_prediction_hooks_fn()),
config=run_config,
params=self._config["params"])
def _make_eval_prediction_hooks_fn(self):
if "eval" not in self._config:
self._config["eval"] = {}
if (not self._config["eval"].get("save_eval_predictions", False)
and self._config["eval"].get("external_evaluators") is None):
return None
save_path = os.path.join(self._config["model_dir"], "eval")
if not tf.gfile.Exists(save_path):
tf.gfile.MakeDirs(save_path)
return lambda predictions: [
hooks.SaveEvaluationPredictionHook(
self._model,
os.path.join(save_path, "predictions.txt"),
post_evaluation_fn=external_evaluation_fn(
self._config["eval"].get("external_evaluators"),
self._config["data"]["eval_labels_file"],
output_dir=save_path),
predictions=predictions)]
def _build_train_spec(self, checkpoint_path):
train_hooks = [
hooks.LogParametersCountHook()]
if checkpoint_path is not None:
train_hooks.append(hooks.LoadWeightsFromCheckpointHook(checkpoint_path))
train_spec = tf.estimator.TrainSpec(
input_fn=self._model.input_fn(
tf.estimator.ModeKeys.TRAIN,
self._config["train"]["batch_size"],
self._config["data"],
self._config["data"]["train_features_file"],
labels_file=self._config["data"]["train_labels_file"],
batch_type=self._config["train"]["batch_type"],
batch_multiplier=self._num_devices,
bucket_width=self._config["train"]["bucket_width"],
single_pass=self._config["train"].get("single_pass", False),
num_threads=self._config["train"].get("num_threads"),
sample_buffer_size=self._config["train"]["sample_buffer_size"],
prefetch_buffer_size=self._config["train"].get("prefetch_buffer_size"),
maximum_features_length=self._config["train"].get("maximum_features_length"),
maximum_labels_length=self._config["train"].get("maximum_labels_length")),
max_steps=self._config["train"].get("train_steps"),
hooks=train_hooks)
return train_spec
def _build_eval_spec(self):
if "eval" not in self._config:
self._config["eval"] = {}
eval_spec = tf.estimator.EvalSpec(
input_fn=self._model.input_fn(
tf.estimator.ModeKeys.EVAL,
self._config["eval"]["batch_size"],
self._config["data"],
self._config["data"]["eval_features_file"],
num_threads=self._config["eval"].get("num_threads"),
prefetch_buffer_size=self._config["eval"].get("prefetch_buffer_size"),
labels_file=self._config["data"]["eval_labels_file"]),
steps=None,
exporters=_make_exporters(
self._config["eval"]["exporters"],
self._model.serving_input_fn(self._config["data"]),
assets_extra=self._get_model_assets()),
throttle_secs=self._config["eval"]["eval_delay"])
return eval_spec
def _get_model_assets(self):
generated_assets_path = os.path.join(self._estimator.model_dir, "assets")
if not tf.gfile.Exists(generated_assets_path):
tf.gfile.MakeDirs(generated_assets_path)
return self._model.get_assets(self._config["data"], asset_dir=generated_assets_path)
def train_and_evaluate(self, checkpoint_path=None):
"""Runs the training and evaluation loop.
Args:
checkpoint_path: The checkpoint path to load the model weights from it.
"""
if checkpoint_path is not None and tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
train_spec = self._build_train_spec(checkpoint_path)
eval_spec = self._build_eval_spec()
tf.estimator.train_and_evaluate(self._estimator, train_spec, eval_spec)
self._maybe_average_checkpoints()
def train(self, checkpoint_path=None):
"""Runs the training loop.
Args:
checkpoint_path: The checkpoint path to load the model weights from it.
"""
if checkpoint_path is not None and tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
train_spec = self._build_train_spec(checkpoint_path)
self._estimator.train(
train_spec.input_fn, hooks=train_spec.hooks, max_steps=train_spec.max_steps)
self._maybe_average_checkpoints()
def evaluate(self, checkpoint_path=None):
"""Runs evaluation."""
if checkpoint_path is not None and tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
eval_spec = self._build_eval_spec()
self._estimator.evaluate(
eval_spec.input_fn, hooks=eval_spec.hooks, checkpoint_path=checkpoint_path)
def _maybe_average_checkpoints(self, avg_subdirectory="avg"):
"""Averages checkpoints if enabled in the training configuration and if the
current training instance is the chief.
Args:
avg_subdirectory: The directory within the model directory that will
contain the averaged checkpoint.
Returns:
The path to the directory containing the averaged checkpoint or ``None``
if no checkpoints were averaged.
"""
average_last_checkpoints = self._config["train"].get("average_last_checkpoints", 0)
if average_last_checkpoints > 0 and self._estimator.config.is_chief:
return self.average_checkpoints(
os.path.join(self._estimator.model_dir, avg_subdirectory),
max_count=average_last_checkpoints)
return None
def average_checkpoints(self, output_dir, max_count=8):
"""Averages checkpoints.
Args:
output_dir: The directory that will contain the averaged checkpoint.
max_count: The maximum number of checkpoints to average.
Returns:
The path to the directory containing the averaged checkpoint.
"""
return checkpoint.average_checkpoints(
self._estimator.model_dir,
output_dir,
max_count=max_count,
session_config=self._estimator.config.session_config)
def infer(self,
features_file,
predictions_file=None,
checkpoint_path=None,
log_time=False):
"""Runs inference.
Args:
features_file: The file(s) to infer from.
predictions_file: If set, predictions are saved in this file.
checkpoint_path: Path of a specific checkpoint to predict. If ``None``,
the latest is used.
log_time: If ``True``, several time metrics will be printed in the logs at
the end of the inference loop.
"""
if "infer" not in self._config:
self._config["infer"] = {}
if checkpoint_path is not None and tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
input_fn = self._model.input_fn(
tf.estimator.ModeKeys.PREDICT,
self._config["infer"]["batch_size"],
self._config["data"],
features_file,
bucket_width=self._config["infer"]["bucket_width"],
num_threads=self._config["infer"].get("num_threads"),
prefetch_buffer_size=self._config["infer"].get("prefetch_buffer_size"))
if predictions_file:
stream = io.open(predictions_file, encoding="utf-8", mode="w")
else:
stream = sys.stdout
infer_hooks = []
if log_time:
infer_hooks.append(hooks.LogPredictionTimeHook())
ordered_writer = None
write_fn = lambda prediction: (
self._model.print_prediction(prediction, params=self._config["infer"], stream=stream))
for prediction in self._estimator.predict(
input_fn=input_fn,
checkpoint_path=checkpoint_path,
hooks=infer_hooks):
# If the index is part of the prediction, they may be out of order.
if "index" in prediction:
if ordered_writer is None:
ordered_writer = OrderRestorer(
index_fn=lambda prediction: prediction["index"], callback_fn=write_fn)
ordered_writer.push(prediction)
else:
write_fn(prediction)
if predictions_file:
stream.close()
def export(self, checkpoint_path=None, export_dir_base=None):
"""Exports a model.
Args:
checkpoint_path: The checkpoint path to export. If ``None``, the latest is used.
export_dir_base: The base directory in which a timestamped subdirectory
containing the exported model will be created. Defaults to
``$MODEL_DIR/export/manual``.
Returns:
The string path to the exported directory.
"""
if checkpoint_path is not None and tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if export_dir_base is None:
export_dir_base = os.path.join(self._estimator.model_dir, "export", "manual")
kwargs = {}
if hasattr(self._estimator, "export_saved_model"):
export_fn = self._estimator.export_saved_model
else:
export_fn = self._estimator.export_savedmodel
if "strip_default_attrs" in fn_args(self._estimator.export_savedmodel):
# Set strip_default_attrs to True for TensorFlow 1.6+ to stay consistent
# with the behavior of tf.estimator.Exporter.
kwargs["strip_default_attrs"] = True
return export_fn(
export_dir_base,
self._model.serving_input_fn(self._config["data"]),
assets_extra=self._get_model_assets(),
checkpoint_path=checkpoint_path,
**kwargs)
def score(self, features_file, predictions_file, checkpoint_path=None):
"""Scores existing predictions.
Args:
features_file: The input file.
predictions_file: The predictions file to score.
checkpoint_path: Path of a specific checkpoint to use. If ``None``,
the latest is used.
Raises:
ValueError: if no checkpoint are found or if the model is not a sequence to
sequence model.
"""
if not hasattr(self._model, "target_inputter"):
raise ValueError("scoring only works for sequence to sequence models")
if checkpoint_path is None:
checkpoint_path = tf.train.latest_checkpoint(self._estimator.model_dir)
elif tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if checkpoint_path is None:
raise ValueError("could not find a trained model in %s" % self._estimator.model_dir)
if "score" not in self._config:
self._config["score"] = {}
input_fn = self._model.input_fn(
tf.estimator.ModeKeys.EVAL,
self._config["score"]["batch_size"],
self._config["data"],
features_file,
labels_file=predictions_file,
num_threads=self._config["score"].get("num_threads"),
prefetch_buffer_size=self._config["score"].get("prefetch_buffer_size"))
with tf.Graph().as_default() as g:
tf.train.create_global_step(g)
features, labels = input_fn()
labels["alignment"] = None # Add alignment key to force the model to return attention.
with tf.variable_scope(self._model.name):
outputs, _ = self._model(
features,
labels,
self._estimator.params,
tf.estimator.ModeKeys.EVAL)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=outputs["logits"], labels=labels["ids_out"])
weights = tf.sequence_mask(labels["length"], dtype=cross_entropy.dtype)
masked_cross_entropy = cross_entropy * weights
scores = tf.reduce_sum(masked_cross_entropy, axis=1)
results = {
"attention": outputs["attention"],
"cross_entropy": cross_entropy,
"score": scores,
"tokens": labels["tokens"],
"length": labels["length"] - 1 # For -1, see sequence_to_sequence.shift_target_sequence.
}
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
config=self._estimator.config.session_config)) as sess:
while not sess.should_stop():
for batch in misc.extract_batches(sess.run(results)):
tokens = batch["tokens"][:batch["length"]]
sentence = self._model.target_inputter.tokenizer.detokenize(tokens)
token_level_scores = None
if self._config["score"].get("with_token_level"):
token_level_scores = batch["cross_entropy"][:batch["length"]]
alignment_type = self._config["score"].get("with_alignments")
sentence = format_translation_output(
sentence,
score=batch["score"],
token_level_scores=token_level_scores,
attention=batch["attention"][:batch["length"]],
alignment_type=alignment_type)
misc.print_bytes(tf.compat.as_bytes(sentence))
def _make_exporters(exporters_type, serving_input_fn, assets_extra=None):
if exporters_type is None:
return None
if not isinstance(exporters_type, list):
exporters_type = [exporters_type]
exporters = []
for exporter_type in exporters_type:
exporter_type = exporter_type.lower()
if exporter_type == "last":
exporters.append(tf.estimator.LatestExporter(
"latest", serving_input_fn, assets_extra=assets_extra))
elif exporter_type == "final":
exporters.append(tf.estimator.FinalExporter(
"final", serving_input_fn, assets_extra=assets_extra))
elif exporter_type == "best":
if not hasattr(tf.estimator, "BestExporter"):
raise ValueError("BestExporter is only available starting from TensorFlow 1.9")
exporters.append(tf.estimator.BestExporter(
name="best", serving_input_receiver_fn=serving_input_fn, assets_extra=assets_extra))
else:
raise ValueError("invalid exporter type: %s" % exporter_type)
if len(exporters) == 1:
return exporters[0]
return exporters
|
from setuptools import setup, find_packages
from setuptools.extension import Extension
try:
from Cython.Distutils import build_ext
USE_CYTHON = True
except ImportError:
USE_CYTHON = False
import os
packages = ['lely_io']
package_data = {}
package_dir = {}
for pkg in packages:
package_data[pkg] = ['*.pxd']
package_dir[pkg] = os.path.join(*pkg.split('.'))
ext = '.pyx' if USE_CYTHON else '.c'
ext_modules = []
for pkg in packages:
ext_modules.append(Extension(
pkg + '.*',
[os.path.join(*[os.path.dirname(__file__), package_dir[pkg], '*' + ext])],
language='c', libraries=['lely-io']
))
if USE_CYTHON:
from Cython.Build import cythonize
ext_modules = cythonize(ext_modules, include_path=['../can'])
setup(
name='lely_io',
version='3.0.2',
description='Python bindings for the Lely I/O library.',
url='https://gitlab.com/lely_industries/lely-core',
author='J. S. Seldenthuis',
author_email='jseldenthuis@lely.com',
license='Apache-2.0',
packages=find_packages(),
package_data=package_data,
package_dir=package_dir,
ext_modules=ext_modules
)
|
'''
Notes:
https://stackoverflow.com/questions/12193013/flask-python-trying-to-return-list-or-dict-to-ajax-call
'''
from flask import Flask, render_template
from flask import jsonify
import os
from flask import request
import time
app = Flask(__name__)
#app.register_blueprint()
import base64
import sys
import codecs
import MySQLdb
def get_db():
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="test", # your username
passwd="test", # your password
db="test") # name of the data base
return db
def get_db_cursor():
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="test", # your username
passwd="test", # your password
db="test") # name of the data base
# you must create a Cursor object. It will let
# you execute all the queries you need
return db.cursor()
'''
Add Github
'''
def add_github(db, github_link, admin_notes=None):
# you must create a Cursor object. It will let
# you execute all the queries you need
cur = db.cursor()
time_now = time.strftime('%Y-%m-%d %H:%M:%S')
#
sql = "INSERT INTO TP_GITHUB_ANALYZER (GITHUB_LINK, ADMIN_NOTES, ADDED_DATE) VALUES (%s, %s, %s)"
values = (github_link, admin_notes, time_now)
# Use all the SQL you like
cur.execute(sql, values)
db.commit()
print('Done : '+str(cur.rowcount)+" inserted")
'''
Read Github
'''
def read_github(db):
cur = db.cursor()
# Use all the SQL you like
cur.execute("SELECT * FROM TP_GITHUB_ANALYZER")
# print all the first cell of all the rows
counter = 0;
github_list = []
for row in cur.fetchall():
try:
counter = counter + 1
github_dict = {}
github_dict['gid'] = str(row[0])
github_dict['github_link'] = str(row[1])
github_dict['added_date'] = str(row[2])
github_dict['updated_date'] = str(row[3])
github_dict['admin_notes'] = str(row[4])
github_list.append(github_dict)
print(github_dict)
except ValueError as error:
print('Error', format(error))
return github_list
@app.route("/")
def hello():
return render_template('index.html')
'''
Get Cities
possible urls:
http://localhost:5000/get/cities
'''
@app.route("/get/cities")
def get_cities():
db = get_db()
c_list = read_city(db)
return jsonify(c_list)
#return "Hello World 2"
'''
Add Github
possible urls:
http://localhost:5000/add/github
'''
@app.route("/add/github")
def add_github_rest():
db = get_db()
g_link = request.form.get('link')
add_github(db, g_link, None)
return "added"
'''
Add Github View
possible urls:
http://localhost:5000/add/github/view
'''
@app.route("/add/github/view", methods=['POST'])
def add_github_view():
db = get_db()
g_link = request.form.get('link')
add_github(db, g_link, None)
result = {
'apiresult' : 0,
'apimessage': 'ok'
}
return render_template('add-result.html', result=result)
'''
Get Github links
possible urls:
http://localhost:5000/get/github/links
'''
@app.route("/get/github/links")
def get_github_links():
db = get_db()
github_list = read_github(db)
return jsonify(github_list)
'''
Get Github links view
possible urls:
http://localhost:5000/get/github/links/view
'''
@app.route("/get/github/links/view")
def get_github_links_view():
db = get_db()
github_list = read_github(db)
g_list_json = (github_list)
result = {
'apiresult' : 0,
'apimessage': 'ok',
'apivalue' : g_list_json
}
return render_template('view-links.html', result=result)
if __name__ == "__main__":
app.config['city'] = 'Toronto'
host = os.environ.get('IP', '127.0.0.1')
port = int(os.environ.get('PORT', 5000))
app.run(host= host, port = port, use_reloader = False)
|
def default_config_path(agent_name, env_name):
return 'configs/{}_{}.json'.format(agent_name, env_name)
def default_save_folder(agent_name, env_name):
return '{}_{}_save_point'.format(agent_name, env_name)
__all__ = ['default_config_path', 'default_save_folder']
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
from functools import partial
from pathlib import Path
import pytest
import torch.cuda
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import colossalai
from colossalai.builder import build_dataset, build_data_sampler
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
CONFIG = dict(
train_data=dict(
dataset=dict(
type='CIFAR10Dataset',
root=Path(os.environ['DATA']),
train=True,
download=True,
transform_pipeline=[
dict(type='ToTensor'),
dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
]
),
dataloader=dict(
num_workers=2,
batch_size=8,
sampler=dict(
type='DataParallelSampler',
)
)
),
parallel=dict(
pipeline=dict(size=1),
tensor=dict(size=1, mode=None),
),
seed=1024,
)
def run_data_sampler(local_rank, world_size):
dist_args = dict(
config=CONFIG,
local_rank=local_rank,
world_size=world_size,
backend='gloo',
port='29503',
host='localhost'
)
colossalai.init_dist(**dist_args)
print('finished initialization')
dataset = build_dataset(gpc.config.train_data.dataset)
sampler_cfg = gpc.config.train_data.dataloader.pop('sampler')
sampler = build_data_sampler(sampler_cfg, dataset)
dataloader = DataLoader(dataset=dataset, sampler=sampler, **gpc.config.train_data.dataloader)
data_iter = iter(dataloader)
img, label = data_iter.next()
img = img[0]
if gpc.get_local_rank(ParallelMode.DATA) != 0:
img_to_compare = img.clone()
else:
img_to_compare = img
dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA))
if gpc.get_local_rank(ParallelMode.DATA) != 0:
assert not torch.equal(img,
img_to_compare), 'Same image was distributed across ranks but expected it to be different'
@pytest.mark.cpu
def test_data_sampler():
world_size = 4
test_func = partial(run_data_sampler, world_size=world_size)
mp.spawn(test_func, nprocs=world_size)
if __name__ == '__main__':
test_data_sampler()
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests import utils
class SimpleAbsModule(torch.nn.Module):
def __init__(self):
super(SimpleAbsModule, self).__init__()
def forward(self, a):
return torch.abs(a + a)
class TestAbs(utils.TorchGlowTestCase):
def test_abs_basic(self):
"""Basic test of the PyTorch Abs Node on Glow."""
x = torch.randn(10)
utils.compare_tracing_methods(
SimpleAbsModule(),
x,
fusible_ops={"aten::abs"},
)
def test_abs_3d(self):
"""Test multidimensional tensor for the PyTorch Abs Node on Glow."""
x = torch.randn(2, 3, 5)
utils.compare_tracing_methods(
SimpleAbsModule(),
x,
fusible_ops={"aten::abs"},
)
|
# create a simple dictionary
myDict = {"key1":1, "key2":2, "key3":3}
print myDict
print myDict["key1"]
print " "
# add a key:pair -- notice that the values can be any data type
myDict["newkey"] = "new"
print myDict
# and so can the keys
myDict[5] = 5
print myDict
print " "
# loop over the elements
for k, v in myDict.iteritems():
print "key = %s, value = %s" % (`k`, `v`)
print " "
# just get the keys
keys = myDict.keys()
print keys
print " "
# check whether a key exists
print "key1" in keys
print "dummykey" not in keys
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.