content
stringlengths 5
1.05M
|
|---|
import datetime
import io
import pickle
from whylogs.app import Session
from whylogs.core.datasetprofile import DatasetProfile
def profiles_eq(profile1: DatasetProfile, profile2: DatasetProfile):
assert set(profile1.columns) == set(profile2.columns)
assert profile1.constraints == profile2.constraints
assert profile1.dataset_timestamp == profile2.dataset_timestamp
assert profile1.session_id == profile2.session_id
# TODO this fails on mac for some reason. Need to figure out why.
# assert str(profile1.to_summary()) == str(profile2.to_summary())
assert profile1.name == profile2.name
def test_pickle_with_dataset_timestamp():
session = Session("project", "pipeline", writers=[])
dt = datetime.datetime.fromtimestamp(1634939335, tz=datetime.timezone.utc)
logger = session.logger("", dataset_timestamp=dt)
logger.log_csv(
io.StringIO(
"""a,b,c
1,1,1
1,1,2
4,4,3
"""
)
)
profile = logger.profile
pickled_profile = pickle.dumps(profile)
unpickled_profile: DatasetProfile = pickle.loads(pickled_profile)
profiles_eq(profile, unpickled_profile)
def test_serde_with_dataset_timezone():
session = Session("project", "pipeline", writers=[])
dt = datetime.datetime.fromtimestamp(1634939335, tz=datetime.timezone.utc)
logger = session.logger("", dataset_timestamp=dt)
logger.log_csv(
io.StringIO(
"""a,b,c
1,1,1
1,1,2
4,4,3
"""
)
)
profile = logger.profile
deserialized_profile = DatasetProfile.parse_delimited_single(profile.serialize_delimited())[1]
profiles_eq(profile, deserialized_profile)
def test_serde_without_dataset_timezone():
session = Session("project", "pipeline", writers=[])
dt = datetime.datetime.fromtimestamp(1634939335, tz=None)
logger = session.logger("", dataset_timestamp=dt)
logger.log_csv(
io.StringIO(
"""a,b,c
1,1,1
1,1,2
4,4,3
"""
)
)
profile = logger.profile
deserialized_profile = DatasetProfile.parse_delimited_single(profile.serialize_delimited())[1]
profiles_eq(profile, deserialized_profile)
|
#!/usr/bin/env python3
import os
import sys
import argparse
from codegen import fblas_types
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from codegen import fblas_codegen
from codegen import json_parser
from codegen import fblas_types
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("json_file")
parser.add_argument("-output_dir", type=str, default="/tmp/")
args = parser.parse_args()
jd = json_parser.JSONParser(fblas_types.FblasCodegen.HostCodegen)
r = jd.parse_json(args.json_file)
codegen = fblas_codegen.FBLASCodegen(args.output_dir, fblas_types.FblasCodegen.HostCodegen)
codegen.generateRoutines(r)
|
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
#snp1=np.loadtxt('S&P2001-2010.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
#snp11=np.loadtxt('S&P2011-2020.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
dji1=np.loadtxt('DJI2001-2010.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
dji11=np.loadtxt('DJI2011-2020.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
dax1=np.loadtxt('DAX2001.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
dax11=np.loadtxt('DAX2011.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
jse7=np.loadtxt('JSE2007.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
jse11=np.loadtxt('JSE2011.csv',dtype='float',delimiter=',',usecols=(4),unpack=True)
time1 = pd.read_csv("DJI2001-2010.csv",parse_dates=["#Date"],usecols=[0]).values
time11 = pd.read_csv("DJI2011-2020.csv",parse_dates=["#Date"],usecols=[0]).values
timedax1 = pd.read_csv("DAX2001.csv",parse_dates=["#Date"],usecols=[0]).values
timedax11 = pd.read_csv("DAX2011.csv",parse_dates=["#Date"],usecols=[0]).values
timejse7 = pd.read_csv("JSE2007.csv",parse_dates=["#Date"],usecols=[0]).values
timejse11 = pd.read_csv("JSE2011.csv",parse_dates=["#Date"],usecols=[0]).values
plt.figure()
plt.plot(time1,dji1,'b',label='DOW Index')
plt.plot(timedax1,dax1,'r',label='DAX Index')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones and DAX Stock Price 2001 - 2010')
plt.legend()
plt.annotate("September 11 Attacks",xy=(pd.Timestamp('2001-9-11'), 9473.73),xytext=(pd.Timestamp('2001-2-11'), 12000),arrowprops=dict(arrowstyle='-|>'))
plt.annotate("Lehman Brothers Collapse",xy=(pd.Timestamp('2008-9-15'), 10926.2),xytext=(pd.Timestamp('2007-2-15'), 10000),arrowprops=dict(arrowstyle='-|>'))
plt.grid()
plt.figure()
plt.plot(time11,dji11,'b',label='DOW Index')
plt.plot(timedax11,dax11,'r',label='DAX Index')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones and DAX Stock Price 2011 - April 7, 2020')
plt.legend()
plt.grid()
plt.figure()
plt.plot(timejse7,jse7,'g',label='JSE Index')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('JSE Stock Price 2007 - 2010')
plt.legend()
plt.annotate("FIFA World Cup",xy=(pd.Timestamp('2010-6-11'), 8.5),xytext=(pd.Timestamp('2010-1-11'), 10),arrowprops=dict(arrowstyle='-|>'))
plt.grid()
plt.figure()
plt.plot(timejse11,jse11,'g',label='JSE Index')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('JSE Stock Price 2011 - 2020')
plt.legend()
plt.grid()
dji1fft = np.fft.rfft(dji1) # Using real because imaginary values from normal fft contribute nothing
dji11fft = np.fft.rfft(dji11)
dax1fft = np.fft.rfft(dax1)
dax11fft = np.fft.rfft(dax11)
jse7fft = np.fft.rfft(jse7)
threshfreq1 = 1/90 # Tri-monthly
threshfreq2 = 1/180 # Half-year
threshfreq3 = 1/60 # Bi monthly
threshfreq4 = 1/5 # Weekly, because stock market closed on weekends
threshfreq5 = 1/120 # Per quarter
def lowfreq1(x):
for i in range(len(x)):
if freq1[i]>threshfreq5:
x[i]=0
return x
def highfreq1(x):
for i in range(len(x)):
if freq1[i]<threshfreq4:
x[i]=0
return x
def lowfreq11(x):
for i in range(len(x)):
if freq11[i]>threshfreq5:
x[i]=0
return x
def highfreq11(x):
for i in range(len(x)):
if freq11[i]<threshfreq4:
x[i]=0
return x
def lowfreqdax1(x):
for i in range(len(x)):
if freqdax1[i]>threshfreq5:
x[i]=0
return x
def highfreqdax1(x):
for i in range(len(x)):
if freqdax1[i]<threshfreq4:
x[i]=0
return x
def lowfreqdax11(x):
for i in range(len(x)):
if freqdax11[i]>threshfreq5:
x[i]=0
return x
def highfreqdax11(x):
for i in range(len(x)):
if freqdax11[i]<threshfreq4:
x[i]=0
return x
def lowfreq7(x):
for i in range(len(x)):
if freqjse7[i]>threshfreq5:
x[i]=0
return x
def highfreq7(x):
for i in range(len(x)):
if freqjse7[i]<threshfreq4:
x[i]=0
return x
freq1=np.fft.rfftfreq(len(dji1))
freq11=np.fft.rfftfreq(len(dji11))
freqdax1=np.fft.rfftfreq(len(dax1))
freqdax11=np.fft.rfftfreq(len(dax11))
freqjse7=np.fft.rfftfreq(len(jse7))
lowdji1 = lowfreq1(dji1fft)
lowdax1 = lowfreqdax1(dax1fft)
plt.figure()
plt.plot(time1[1::],np.fft.irfft(lowdji1),'b')
plt.plot(timedax1[1::],np.fft.irfft(lowdax1),'r')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones and DAX Stock Price 2001-2010 (Smoothed out)')
plt.grid()
lowdji11 = lowfreq11(dji11fft)
lowdax11 = lowfreqdax11(dax11fft)
plt.figure()
plt.plot(time11[1::],np.fft.irfft(lowdji11),'b')
plt.plot(timedax11,np.fft.irfft(lowdax11),'r')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones and DAX Stock Price 2011-2020 (Smoothed out)')
plt.grid()
dji1fft = np.fft.rfft(dji1) #Refreshing since for some reason the previous ffts mess up the following code
dji11fft = np.fft.rfft(dji11)
dax1fft = np.fft.rfft(dax1)
dax11fft = np.fft.rfft(dax11)
highdji1 = highfreq1(dji1fft)
highdax1 = highfreqdax1(dax1fft)
plt.figure()
plt.plot(time1[1::],np.fft.irfft(highdji1),'b')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones Stock Price 2001-2010 (Stability)')
plt.grid()
plt.figure()
plt.plot(timedax1[1::],np.fft.irfft(highdax1),'r')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('DAX Stock Price 2001-2010 (Stability)')
plt.grid()
highdji11 = highfreq11(dji11fft)
highdax11 = highfreqdax11(dax11fft)
plt.figure()
plt.plot(time11[2:-1],np.fft.irfft(highdji11)[1:-1],'b')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones Stock Price 2011-2020 (Stability)')
plt.grid()
plt.figure()
plt.plot(timedax11[1:-1],np.fft.irfft(highdax11)[1:-1],'r')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('DAX Stock Price 2011-2020 (Stability)')
plt.grid()
jse7fft = np.fft.rfft(jse7)
lowjse7 = lowfreq7(jse7fft)
plt.figure()
plt.plot(timejse7,np.fft.irfft(lowjse7),'g')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('JSE Stock Price 2007-2010 (Smoothed out)')
plt.grid()
jse7fft = np.fft.rfft(jse7)
highjse7 = highfreq7(jse7fft)
plt.figure()
plt.plot(timejse7[1:-1],np.fft.irfft(highjse7)[1:-1],'g')
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('JSE Stock Price 2007-2010 (Stability)')
plt.grid()
corr1 = np.convolve(dax11,dji11)
plt.figure()
plt.plot(corr1)
plt.xlabel('Year')
plt.ylabel('Stock Price')
plt.title('Dow Jones Stock Price 2011-2020 (Smoothed out)')
plt.grid()
|
import brownie
import pytest
import math
@pytest.mark.parametrize("amount", [1, 100, 10**18])
def test_mutiple_withdraw_mock(amount, alice, bob, charlie, dave, mock_vault, alcx, mock_ss_compounder):
prior_alcx_balance_alice = alcx.balanceOf(alice)
prior_alcx_balance_bob = alcx.balanceOf(bob)
prior_alcx_balance_charlie = alcx.balanceOf(charlie)
prior_alcx_balance_dave = alcx.balanceOf(dave)
for account in [alice, bob, charlie, dave]:
alcx.approve(mock_vault, alcx.totalSupply(), {'from': account})
mock_vault.deposit(amount, {'from': account})
for account in [bob, charlie, dave]:
assert mock_vault.balanceOf(account) == mock_vault.balanceOf(alice)
mock_vault.withdraw(amount, {'from': alice})
alice_fee = amount * 250 // 10000
assert alcx.balanceOf(alice) == prior_alcx_balance_alice - alice_fee
mock_vault.withdraw(amount, {'from': bob})
bob_gain = (alice_fee // 3)
bob_fee = (amount + bob_gain) * 250 // 10000
assert alcx.balanceOf(bob) == prior_alcx_balance_bob + bob_gain - bob_fee
mock_vault.withdraw(amount, {'from': charlie})
charlie_gain = bob_gain + (bob_fee // 2)
charlie_fee = (amount + charlie_gain) * 250 // 10000
assert math.isclose(alcx.balanceOf(charlie), prior_alcx_balance_charlie + charlie_gain - charlie_fee, rel_tol=1)
pool_balance = mock_ss_compounder.totalPoolBalance()
mock_vault.withdraw(amount, {'from': dave})
dave_gain = charlie_gain + charlie_fee
assert math.isclose(alcx.balanceOf(dave), prior_alcx_balance_dave + pool_balance - amount, rel_tol=1)
assert math.isclose(alcx.balanceOf(dave), prior_alcx_balance_dave + dave_gain, rel_tol=1)
assert mock_ss_compounder.totalPoolBalance() == 0
assert mock_vault.totalSupply() == 0
balances = 0
for account in [alice, bob, charlie, dave]:
balances += alcx.balanceOf(account)
assert mock_vault.balanceOf(account) == 0
assert balances == (prior_alcx_balance_alice + prior_alcx_balance_bob +
prior_alcx_balance_charlie + prior_alcx_balance_dave)
def test_with_simulated_harvest_mock(alice, bob, charlie, dave, mock_vault, alcx, mock_ss_compounder, mock_pool, owner):
amount = 1000
harvest = 400
prior_alcx_balance_alice = alcx.balanceOf(alice)
prior_alcx_balance_bob = alcx.balanceOf(bob)
prior_alcx_balance_charlie = alcx.balanceOf(charlie)
prior_alcx_balance_dave = alcx.balanceOf(dave)
for account in [alice, bob, charlie, dave]:
alcx.approve(mock_vault, alcx.totalSupply(), {'from': account})
mock_vault.deposit(amount, {'from': account})
for account in [bob, charlie, dave]:
assert mock_vault.balanceOf(account) == mock_vault.balanceOf(alice)
alcx.approve(mock_pool, harvest, {'from': owner})
mock_pool.deposit(0, harvest, {'from': owner})
mock_vault.withdraw(amount, {'from': alice})
harvest_gain = harvest // 4
alice_fee = (amount + harvest_gain) * 250 // 10000
assert alcx.balanceOf(alice) == prior_alcx_balance_alice + harvest_gain - alice_fee
mock_vault.withdraw(amount, {'from': bob})
bob_gain = (alice_fee // 3) + harvest_gain
bob_fee = (amount + bob_gain) * 250 // 10000
assert alcx.balanceOf(bob) == prior_alcx_balance_bob + bob_gain - bob_fee
mock_vault.withdraw(amount, {'from': charlie})
charlie_gain = bob_gain + (bob_fee // 2) + harvest_gain
charlie_fee = (amount + charlie_gain) * 250 // 10000
assert math.isclose(alcx.balanceOf(charlie), prior_alcx_balance_charlie + charlie_gain - charlie_fee, rel_tol=1)
pool_balance = mock_ss_compounder.totalPoolBalance()
mock_vault.withdraw(amount, {'from': dave})
dave_gain = charlie_gain + charlie_fee + harvest_gain
assert math.isclose(alcx.balanceOf(dave), prior_alcx_balance_dave + pool_balance - amount, rel_tol=1)
assert math.isclose(alcx.balanceOf(dave), prior_alcx_balance_dave + dave_gain, rel_tol=1)
assert mock_ss_compounder.totalPoolBalance() == 0
assert mock_vault.totalSupply() == 0
balances = 0
for account in [alice, bob, charlie, dave]:
balances += alcx.balanceOf(account)
assert mock_vault.balanceOf(account) == 0
|
from django.conf.urls import include, url
from django.urls import path
from . import views
app_name = 'notes'
urlpatterns = [
path('',views.WorkToNote, name='notes'),
# url(r'^$', views.WorkToNote, name='notes'),
]
|
# -*- coding: utf-8 -*-
from plugpy.ext.skype import SimpleSkypePlugin
import itertools
import mimetools
import mimetypes
from cStringIO import StringIO
import urllib2
import os
import feedparser
from pyquery import PyQuery as pq
import random
import tempfile
import Image
url = "http://pinkimg.blog57.fc2.com/?xml"
class MultiPartForm(object):
"""Accumulate the data to be used when posting a form."""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = mimetools.choose_boundary()
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, value))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((fieldname, filename, mimetype, body))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
def get_entries():
urls = []
d = feedparser.parse(url)
for a in d.entries:
urls.append(a.link)
return urls
def get_image_list(url):
urls = []
conn = urllib2.urlopen(url)
page = conn.read()
d = pq(page)
for a in d(".entry_text img"):
src = pq(a).attr.src
if src.endswith("s.jpg"):
urls.append(src)
return urls
def download(urls):
imgs = []
for url in urls:
conn = urllib2.urlopen(url)
data = conn.read()
f = tempfile.TemporaryFile()
f.write(data)
f.flush()
f.seek(0)
img = Image.open(f)
imgs.append(img)
return concat_img(imgs)
def concat_img(imgs):
w = max(i.size[0] for i in imgs)
h = sum(i.size[1] for i in imgs)
result = Image.new("RGBA", (w, h))
y = 0
for i in imgs:
result.paste(i, (0, y))
y += i.size[1]
return result
def get_concat_image():
urls = get_entries()
url = random.choice(urls)
image_urls = get_image_list(url)
return download(image_urls)
def pink2gyazo():
image = get_concat_image()
import tempfile
tmp = tempfile.TemporaryFile()
image.save(tmp, "png")
tmp.seek(0)
form = MultiPartForm()
form.add_field('id', '')
form.add_file('imagedata', 'gyazo.com',
fileHandle=tmp)
request = urllib2.Request('http://gyazo.com/upload.cgi')
request.add_header('User-agent', 'Gyazo/1.0)')
body = str(form)
request.add_header('Content-type', form.get_content_type())
request.add_header('Content-length', len(body))
request.add_data(body)
return urllib2.urlopen(request).read()
class PinkPlugin(SimpleSkypePlugin):
alias = "#pink"
def on_message(self, *args):
return u"これでヌイてね %s" % pink2gyazo()
|
# Python - 3.6.0
stray = lambda arr: [n for n in set(arr) if arr.count(n) == 1][0]
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2015 (ita)
"""
Tool Description
================
This tool helps with finding Qt5 tools and libraries,
and also provides syntactic sugar for using Qt5 tools.
The following snippet illustrates the tool usage::
def options(opt):
opt.load('compiler_cxx qt5')
def configure(conf):
conf.load('compiler_cxx qt5')
def build(bld):
bld(
features = 'qt5 cxx cxxprogram',
uselib = 'QTCORE QTGUI QTOPENGL QTSVG',
source = 'main.cpp textures.qrc aboutDialog.ui',
target = 'window',
)
Here, the UI description and resource files will be processed
to generate code.
Usage
=====
Load the "qt5" tool.
You also need to edit your sources accordingly:
- the normal way of doing things is to have your C++ files
include the .moc file.
This is regarded as the best practice (and provides much faster
compilations).
It also implies that the include paths have beenset properly.
- to have the include paths added automatically, use the following::
from waflib.TaskGen import feature, before_method, after_method
@feature('cxx')
@after_method('process_source')
@before_method('apply_incpaths')
def add_includes_paths(self):
incs = set(self.to_list(getattr(self, 'includes', '')))
for x in self.compiled_tasks:
incs.add(x.inputs[0].parent.path_from(self.path))
self.includes = list(incs)
Note: another tool provides Qt processing that does not require
.moc includes, see 'playground/slow_qt/'.
A few options (--qt{dir,bin,...}) and environment variables
(QT5_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool,
tool path selection, etc; please read the source for more info.
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os, sys
from waflib.Tools import cxx
from waflib import Task, Utils, Options, Errors, Context
from waflib.TaskGen import feature, after_method, extension
from waflib.Configure import conf
from waflib import Logs
MOC_H = ['.h', '.hpp', '.hxx', '.hh']
"""
File extensions associated to the .moc files
"""
EXT_RCC = ['.qrc']
"""
File extension for the resource (.qrc) files
"""
EXT_UI = ['.ui']
"""
File extension for the user interface (.ui) files
"""
EXT_QT5 = ['.cpp', '.cc', '.cxx', '.C']
"""
File extensions of C++ files that may require a .moc processing
"""
QT5_LIBS = '''
qtmain
Qt5Bluetooth
Qt5CLucene
Qt5Concurrent
Qt5Core
Qt5DBus
Qt5Declarative
Qt5DesignerComponents
Qt5Designer
Qt5Gui
Qt5Help
Qt5MultimediaQuick_p
Qt5Multimedia
Qt5MultimediaWidgets
Qt5Network
Qt5Nfc
Qt5OpenGL
Qt5Positioning
Qt5PrintSupport
Qt5Qml
Qt5QuickParticles
Qt5Quick
Qt5QuickTest
Qt5Script
Qt5ScriptTools
Qt5Sensors
Qt5SerialPort
Qt5Sql
Qt5Svg
Qt5Test
Qt5WebKit
Qt5WebKitWidgets
Qt5Widgets
Qt5WinExtras
Qt5X11Extras
Qt5XmlPatterns
Qt5Xml'''
class qxx(Task.classes['cxx']):
"""
Each C++ file can have zero or several .moc files to create.
They are known only when the files are scanned (preprocessor)
To avoid scanning the c++ files each time (parsing C/C++), the results
are retrieved from the task cache (bld.node_deps/bld.raw_deps).
The moc tasks are also created *dynamically* during the build.
"""
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.moc_done = 0
def runnable_status(self):
"""
Compute the task signature to make sure the scanner was executed. Create the
moc tasks by using :py:meth:`waflib.Tools.qt5.qxx.add_moc_tasks` (if necessary),
then postpone the task execution (there is no need to recompute the task signature).
"""
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def create_moc_task(self, h_node, m_node):
"""
If several libraries use the same classes, it is possible that moc will run several times (Issue 1318)
It is not possible to change the file names, but we can assume that the moc transformation will be identical,
and the moc tasks can be shared in a global cache.
The defines passed to moc will then depend on task generator order. If this is not acceptable, then
use the tool slow_qt5 instead (and enjoy the slow builds... :-( )
"""
try:
moc_cache = self.generator.bld.moc_cache
except AttributeError:
moc_cache = self.generator.bld.moc_cache = {}
try:
return moc_cache[h_node]
except KeyError:
tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator)
tsk.set_inputs(h_node)
tsk.set_outputs(m_node)
if self.generator:
self.generator.tasks.append(tsk)
# direct injection in the build phase (safe because called from the main thread)
gen = self.generator.bld.producer
gen.outstanding.insert(0, tsk)
gen.total += 1
return tsk
else:
# remove the signature, it must be recomputed with the moc task
delattr(self, 'cache_sig')
def moc_h_ext(self):
ext = []
try:
ext = Options.options.qt_header_ext.split()
except AttributeError:
pass
if not ext:
ext = MOC_H
return ext
def add_moc_tasks(self):
"""
Create the moc tasks by looking in ``bld.raw_deps[self.uid()]``
"""
node = self.inputs[0]
bld = self.generator.bld
try:
# compute the signature once to know if there is a moc file to create
self.signature()
except KeyError:
# the moc file may be referenced somewhere else
pass
else:
# remove the signature, it must be recomputed with the moc task
delattr(self, 'cache_sig')
include_nodes = [node.parent] + self.generator.includes_nodes
moctasks = []
mocfiles = set([])
for d in bld.raw_deps.get(self.uid(), []):
if not d.endswith('.moc'):
continue
# process that base.moc only once
if d in mocfiles:
continue
mocfiles.add(d)
# find the source associated with the moc file
h_node = None
base2 = d[:-4]
for x in include_nodes:
for e in self.moc_h_ext():
h_node = x.find_node(base2 + e)
if h_node:
break
if h_node:
m_node = h_node.change_ext('.moc')
break
else:
# foo.cpp -> foo.cpp.moc
for k in EXT_QT5:
if base2.endswith(k):
for x in include_nodes:
h_node = x.find_node(base2)
if h_node:
break
if h_node:
m_node = h_node.change_ext(k + '.moc')
break
if not h_node:
raise Errors.WafError('No source found for %r which is a moc file' % d)
# create the moc task
task = self.create_moc_task(h_node, m_node)
moctasks.append(task)
# simple scheduler dependency: run the moc task before others
self.run_after.update(set(moctasks))
self.moc_done = 1
class trans_update(Task.Task):
"""Update a .ts files from a list of C++ files"""
run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}'
color = 'BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
"""
Parser for *.qrc* files
"""
def __init__(self):
self.buf = []
self.files = []
def startElement(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(str(''.join(self.buf)))
def characters(self, cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_rcc_task(self, node):
"Create rcc and cxx tasks for *.qrc* files"
rcnode = node.change_ext('_rc.cpp')
self.create_task('rcc', node, rcnode)
cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks = [cpptask]
return cpptask
@extension(*EXT_UI)
def create_uic_task(self, node):
"hook for uic tasks"
uictask = self.create_task('ui5', node)
uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])]
@extension('.ts')
def add_lang(self, node):
"""add all the .ts file into self.lang"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('qt5')
@after_method('apply_link')
def apply_qt5(self):
"""
Add MOC_FLAGS which may be necessary for moc::
def build(bld):
bld.program(features='qt5', source='main.cpp', target='app', use='QTCORE')
The additional parameters are:
:param lang: list of translation files (\*.ts) to process
:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
:param update: whether to process the C++ files to update the \*.ts files (use **waf --translate**)
:type update: bool
:param langname: if given, transform the \*.ts files into a .qrc files to include in the binary file
:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
"""
if getattr(self, 'lang', None):
qmtasks = []
for x in self.to_list(self.lang):
if isinstance(x, str):
x = self.path.find_resource(x + '.ts')
qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm')))
if getattr(self, 'update', None) and Options.options.trans_qt5:
cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [
a.inputs[0] for a in self.tasks if getattr(a, 'inputs', None) and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update', cxxnodes, x.inputs)
if getattr(self, 'langname', None):
qmnodes = [x.outputs[0] for x in qmtasks]
rcnode = self.langname
if isinstance(rcnode, str):
rcnode = self.path.find_or_declare(rcnode + '.qrc')
t = self.create_task('qm2rcc', qmnodes, rcnode)
k = create_rcc_task(self, t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst = []
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag) < 2: continue
f = flag[0:2]
if f in ('-D', '-I', '/D', '/I'):
if (f[0] == '/'):
lst.append('-' + flag[1:])
else:
lst.append(flag)
self.env.append_value('MOC_FLAGS', lst)
@extension(*EXT_QT5)
def cxx_hook(self, node):
"""
Re-map C++ file extensions to the :py:class:`waflib.Tools.qt5.qxx` task.
"""
return self.create_compiled_task('qxx', node)
class rcc(Task.Task):
"""
Process *.qrc* files
"""
color = 'BLUE'
run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out = ['.h']
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def scan(self):
"""Parse the *.qrc* files"""
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return ([], [])
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(), 'r')
try:
parser.parse(fi)
finally:
fi.close()
nodes = []
names = []
root = self.inputs[0].parent
for x in curHandler.files:
nd = root.find_resource(x)
if nd: nodes.append(nd)
else: names.append(x)
return (nodes, names)
class moc(Task.Task):
"""
Create *.moc* files
"""
color = 'BLUE'
run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui5(Task.Task):
"""
Process *.ui* files
"""
color = 'BLUE'
run_str = '${QT_UIC} ${SRC} -o ${TGT}'
ext_out = ['.h']
class ts2qm(Task.Task):
"""
Create *.qm* files from *.ts* files
"""
color = 'BLUE'
run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
"""
Transform *.qm* files into *.rc* files
"""
color = 'BLUE'
after = 'ts2qm'
def run(self):
"""Create a qrc file including the inputs"""
txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs])
code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt
self.outputs[0].write(code)
def configure(self):
"""
Besides the configuration options, the environment variable QT5_ROOT may be used
to give the location of the qt5 libraries (absolute path).
The detection will use the program *pkg-config* through :py:func:`waflib.Tools.config_c.check_cfg`
"""
self.find_qt5_binaries()
self.set_qt5_libs_to_check()
self.set_qt5_defines()
self.find_qt5_libraries()
self.add_qt5_rpath()
self.simplify_qt5_libs()
@conf
def find_qt5_binaries(self):
env = self.env
opt = Options.options
qtdir = getattr(opt, 'qtdir', '')
qtbin = getattr(opt, 'qtbin', '')
paths = []
if qtdir:
qtbin = os.path.join(qtdir, 'bin')
# the qt directory has been given from QT5_ROOT - deduce the qt binary path
if not qtdir:
qtdir = os.environ.get('QT5_ROOT', '')
qtbin = os.environ.get('QT5_BIN', None) or os.path.join(qtdir, 'bin')
if qtbin:
paths = [qtbin]
# no qtdir, look in the path and in /usr/local/Trolltech
if not qtdir:
paths = os.environ.get('PATH', '').split(os.pathsep)
paths.append('/usr/share/qt5/bin/')
try:
lst = Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
# keep the highest version
qtdir = '/usr/local/Trolltech/%s/' % lst[0]
qtbin = os.path.join(qtdir, 'bin')
paths.append(qtbin)
# at the end, try to find qmake in the paths given
# keep the one with the highest version
cand = None
prev_ver = ['5', '0', '0']
for qmk in ('qmake-qt5', 'qmake5', 'qmake'):
try:
qmake = self.find_program(qmk, path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip()
except self.errors.WafError:
pass
else:
if version:
new_ver = version.split('.')
if new_ver > prev_ver:
cand = qmake
prev_ver = new_ver
# qmake could not be found easily, rely on qtchooser
if not cand:
try:
self.find_program('qtchooser')
except self.errors.ConfigurationError:
pass
else:
cmd = self.env.QTCHOOSER + ['-qt=5', '-run-tool=qmake']
try:
version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION'])
except self.errors.WafError:
pass
else:
cand = cmd
if cand:
self.env.QMAKE = cand
else:
self.fatal('Could not find qmake for qt5')
self.env.QT_INSTALL_BINS = qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_BINS']).strip() + os.sep
paths.insert(0, qtbin)
def find_bin(lst, var):
if var in env:
return
for f in lst:
try:
ret = self.find_program(f, path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt5', 'uic'], 'QT_UIC')
if not env.QT_UIC:
self.fatal('cannot find the uic compiler for qt5')
self.start_msg('Checking for uic version')
uicver = self.cmd_and_log(env.QT_UIC + ['-version'], output=Context.BOTH)
uicver = ''.join(uicver).strip()
uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '')
self.end_msg(uicver)
if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1:
self.fatal('this uic compiler is for qt3 or qt5, add uic for qt5 to your path')
find_bin(['moc-qt5', 'moc'], 'QT_MOC')
find_bin(['rcc-qt5', 'rcc'], 'QT_RCC')
find_bin(['lrelease-qt5', 'lrelease'], 'QT_LRELEASE')
find_bin(['lupdate-qt5', 'lupdate'], 'QT_LUPDATE')
env['UIC_ST'] = '%s -o %s'
env['MOC_ST'] = '-o'
env['ui_PATTERN'] = 'ui_%s.h'
env['QT_LRELEASE_FLAGS'] = ['-silent']
env.MOCCPPPATH_ST = '-I%s'
env.MOCDEFINES_ST = '-D%s'
@conf
def find_qt5_libraries(self):
qtlibs = getattr(Options.options, 'qtlibs', None) or os.environ.get("QT5_LIBDIR", None)
if not qtlibs:
try:
qtlibs = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() + os.sep
qtlibs = os.path.join(qtdir, 'lib')
self.msg('Found the Qt5 libraries in', qtlibs)
qtincludes = os.environ.get("QT5_INCLUDES", None) or self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip()
env = self.env
if not 'PKG_CONFIG_PATH' in os.environ:
os.environ['PKG_CONFIG_PATH'] = '%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib' % (qtlibs, qtlibs)
try:
if os.environ.get("QT5_XCOMPILE", None):
raise self.errors.ConfigurationError()
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt5_vars:
uselib = i.upper()
if Utils.unversioned_sys_platform() == "darwin":
# Since at least qt 4.7.3 each library locates in separate directory
frameworkName = i + ".framework"
qtDynamicLib = os.path.join(qtlibs, frameworkName, i)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_' + uselib, i)
self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN')
else:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('INCLUDES_' + uselib, os.path.join(qtlibs, frameworkName, 'Headers'))
elif env.DEST_OS != "win32":
qtDynamicLib = os.path.join(qtlibs, "lib" + i + ".so")
qtStaticLib = os.path.join(qtlibs, "lib" + i + ".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_' + uselib, i)
self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_' + uselib, i)
self.msg('Checking for %s' % i, qtStaticLib, 'GREEN')
else:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('LIBPATH_' + uselib, qtlibs)
env.append_unique('INCLUDES_' + uselib, qtincludes)
env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i))
else:
# Release library names are like QtCore5
for k in ("lib%s.a", "lib%s5.a", "%s.lib", "%s5.lib"):
lib = os.path.join(qtlibs, k % i)
if os.path.exists(lib):
env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')])
self.msg('Checking for %s' % i, lib, 'GREEN')
break
else:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('LIBPATH_' + uselib, qtlibs)
env.append_unique('INCLUDES_' + uselib, qtincludes)
env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i.replace('Qt5', 'Qt')))
# Debug library names are like QtCore5d
uselib = i.upper() + "_debug"
for k in ("lib%sd.a", "lib%sd5.a", "%sd.lib", "%sd5.lib"):
lib = os.path.join(qtlibs, k % i)
if os.path.exists(lib):
env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')])
self.msg('Checking for %s' % i, lib, 'GREEN')
break
else:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('LIBPATH_' + uselib, qtlibs)
env.append_unique('INCLUDES_' + uselib, qtincludes)
env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i.replace('Qt5', 'Qt')))
else:
for i in self.qt5_vars_debug + self.qt5_vars:
self.check_cfg(package=i, args='--cflags --libs', mandatory=False)
@conf
def simplify_qt5_libs(self):
# the libpaths make really long command-lines
# remove the qtcore ones from qtgui, etc
env = self.env
def process_lib(vars_, coreval):
for d in vars_:
var = d.upper()
if var == 'QTCORE':
continue
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var] = accu
process_lib(self.qt5_vars, 'LIBPATH_QTCORE')
process_lib(self.qt5_vars_debug, 'LIBPATH_QTCORE_DEBUG')
@conf
def add_qt5_rpath(self):
# rpath if wanted
env = self.env
if getattr(Options.options, 'want_rpath', False):
def process_rpath(vars_, coreval):
for d in vars_:
var = d.upper()
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if var != 'QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var] = accu
process_rpath(self.qt5_vars, 'LIBPATH_QTCORE')
process_rpath(self.qt5_vars_debug, 'LIBPATH_QTCORE_DEBUG')
@conf
def set_qt5_libs_to_check(self):
if not hasattr(self, 'qt5_vars'):
self.qt5_vars = QT5_LIBS
self.qt5_vars = Utils.to_list(self.qt5_vars)
if not hasattr(self, 'qt5_vars_debug'):
self.qt5_vars_debug = [a + '_debug' for a in self.qt5_vars]
self.qt5_vars_debug = Utils.to_list(self.qt5_vars_debug)
@conf
def set_qt5_defines(self):
if sys.platform != 'win32':
return
for x in self.qt5_vars:
y=x.replace('Qt5', 'Qt')[2:].upper()
self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y)
self.env.append_unique('DEFINES_%s_DEBUG' % x.upper(), 'QT_%s_LIB' % y)
def options(opt):
"""
Command-line options
"""
opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries')
opt.add_option('--header-ext',
type='string',
default='',
help='header extension for moc files',
dest='qt_header_ext')
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i, type='string', default='', dest=i)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt5", default=False)
|
from macropy.core.test.macros.quote_macro import macros, my_macro
def run(x):
pass
pass
with my_macro:
pass
pass
return x
|
__author__ = 'yinpengcheng'
|
import numpy as np
#np.set_printoptions(threshold=np.nan)
import os
from models_pointnet import QueryAndGroup, PointNetMLP
from config import Config as cfg
import torch.utils.data
from utils import create_dir
torch.set_printoptions(precision=8)
path_to_pcl_data = "/mnt/raid/pablo/data/FlyingThings3D/pointcloud_voxelgrid_15_15_10_10_5_40/pointcloud_voxelgrid"
path_to_voxels_xyz_features = "/mnt/raid/pablo/data/FlyingThings3D/pointcloud_voxelgrid_15_15_10_10_5_40/voxels_xyz_features"
path_to_voxels_xyz_normals_features = "/mnt/raid/pablo/data/FlyingThings3D/pointcloud_voxelgrid_15_15_10_10_5_40/voxels_xyz_normals_features"
path_to_voxels_features = "/mnt/raid/pablo/data/FlyingThings3D/pointcloud_voxelgrid_15_15_10_10_5_40/voxels_features"
path_to_voxels_features_normals = "/mnt/raid/pablo/data/FlyingThings3D/pointcloud_voxelgrid_15_15_10_10_5_40/voxels_features_normals"
# data_splits = ["TEST", "TRAIN"]
# letters = ["A", "B", "C"]
data_splits = ["TRAIN"]
letters = ["B"]
nsample = 16
model = QueryAndGroup(npoints=cfg.T, radius=0.4, nsample=nsample)
mlp = PointNetMLP(mlp_spec=[6, cfg.nfeat // 2, cfg.nfeat // 2, cfg.nfeat])
SPLIT = "TRAIN"
LETTER = "B"
NUMBER = "0594"
just_one_sample = True
print("hallo")
with torch.no_grad():
model.eval()
model.cuda()
# mlp.eval()
# mlp.cuda()
for data_split in data_splits:
if data_split != SPLIT and just_one_sample:
continue
for letter in letters:
if letter != LETTER and just_one_sample:
continue
for number in sorted(os.listdir(os.path.join(path_to_pcl_data, data_split, letter))):
if number != NUMBER and just_one_sample:
continue
if int(number) < 585 or int(number) > 599:
continue
path_to_pcl_sequence = os.path.join(path_to_pcl_data, data_split, letter, number)
print(path_to_pcl_sequence)
path_to_voxels_xyz_features_sequence = os.path.join(path_to_voxels_xyz_features, data_split, letter, number)
path_to_voxels_xyz_normals_features_sequence = os.path.join(path_to_voxels_xyz_normals_features, data_split, letter, number)
path_to_voxels_features_sequence = os.path.join(path_to_voxels_features, data_split, letter, number)
path_to_voxels_features_normals_sequence = os.path.join(path_to_voxels_features_normals, data_split, letter, number)
create_dir(path_to_voxels_xyz_features_sequence)
create_dir(path_to_voxels_xyz_normals_features_sequence)
create_dir(path_to_voxels_features_sequence)
create_dir(path_to_voxels_features_normals_sequence)
######################################################
######################################################
###################################################
# start = torch.cuda.Event(enable_timing=True)
# end = torch.cuda.Event(enable_timing=True)
# start.record()
for sample in sorted(os.listdir(os.path.join(path_to_pcl_sequence))):
path_to_pcl_sample = os.path.join(path_to_pcl_sequence, sample)
path_to_voxels_xyz_features_sample = os.path.join(path_to_voxels_xyz_features_sequence, sample)
path_to_voxels_xyz_normals_features_sample = os.path.join(path_to_voxels_xyz_normals_features_sequence, sample)
path_to_voxels_features_sample = os.path.join(path_to_voxels_features_sequence, sample)
path_to_voxels_features_normals_sample = os.path.join(path_to_voxels_features_normals_sequence, sample)
#print()
# print(path_to_voxels_xyz_features_sample)
# print(path_to_voxels_xyz_normals_features_sample)
# print(path_to_voxels_features_sample)
# print(path_to_voxels_features_normals_sample)
pcl_data = np.load(path_to_pcl_sample)
points = pcl_data['points']
normals = pcl_data['normals']
voxel_coords = pcl_data['voxel_coords']
inv_ind = pcl_data['inv_ind']
voxels_xyz_features = []
voxels_xyz_normals_features = []
voxels_features = []
voxels_features_normals = []
points_cuda = torch.cuda.FloatTensor(points).unsqueeze(0)
normals_cuda = torch.cuda.FloatTensor(normals).unsqueeze(0)
for i in range(len(voxel_coords)):
mask = inv_ind == i
pts = points[mask]
num_pts = pts.shape[0]
pts = torch.cuda.FloatTensor(pts)
pts = pts.unsqueeze(0)
n = normals[mask]
n = torch.cuda.FloatTensor(n)
n = n.unsqueeze(0)
## Compute centroids and features
new_xyz, new_normals, new_features, new_features_normals = \
model((points_cuda, normals_cuda, pts, n))
print(new_xyz.shape)
print(new_normals.shape)
print(new_features.shape)
print(new_features_normals.shape)
# new_xyz w/o normals
new_xyz_centered = new_xyz - new_xyz.mean(dim=1)
new_xyz_concat = torch.cat((new_xyz, new_xyz_centered), 2)
new_xyz_feature = new_xyz_concat.unsqueeze(3).permute(0, 2, 1, 3)
# new_xyz w/ normals
new_xyz_normals_concat = torch.cat((new_xyz, new_xyz_centered, new_normals), 2)
new_xyz_normals_feature = new_xyz_normals_concat.unsqueeze(3).permute(0, 2, 1, 3)
print(new_xyz_feature.shape, new_xyz_normals_feature.shape)
new_xyz_feature = new_xyz_feature.permute(0, 2, 3, 1)
new_xyz_normals_feature = new_xyz_normals_feature.permute(0, 2, 3, 1)
new_features = new_features.permute(0, 2, 3, 1)
new_features_normals = new_features_normals.permute(0, 2, 3, 1)
#print(new_xyz_feature.shape, new_xyz_normals_feature.shape, new_features.shape, new_features_normals.shape)
## Create mask
mask_repeated = np.zeros(cfg.T)
mask_repeated[:num_pts] = 1
mask_repeated = torch.cuda.FloatTensor(mask_repeated)
mask_new_xyz_feature = mask_repeated.unsqueeze(0).unsqueeze(2).unsqueeze(3).repeat(1, 1, 1, 6)
mask_new_xyz_normals_feature = mask_repeated.unsqueeze(0).unsqueeze(2).unsqueeze(3).repeat(1, 1, 1, 9)
mask_new_features = mask_repeated.unsqueeze(0).unsqueeze(2).unsqueeze(3).repeat(1, 1, nsample, 3)
mask_new_features_normals = mask_repeated.unsqueeze(0).unsqueeze(2).unsqueeze(3).repeat(1, 1, nsample, 6)
# print(mask_new_xyz_feature.shape, mask_new_xyz_normals_feature.shape, mask_new_features.shape, mask_new_features_normals.shape)
# print(mask_new_xyz_feature, mask_new_xyz_normals_feature, mask_new_features, mask_new_features_normals)
## Mask points and features
new_xyz_feature *= mask_new_xyz_feature
new_xyz_normals_feature *= mask_new_xyz_normals_feature
new_features *= mask_new_features
new_features_normals *= mask_new_features_normals
new_xyz_feature = new_xyz_feature.permute(0, 3, 1, 2)
new_xyz_normals_feature = new_xyz_normals_feature.permute(0, 3, 1, 2)
new_features = new_features.permute(0, 3, 1, 2)
new_features_normals = new_features_normals.permute(0, 3, 1, 2)
print(new_xyz_feature.shape, new_xyz_normals_feature.shape, new_features.shape, new_features_normals.shape)
voxels_xyz_features.append(new_xyz_feature.cpu().numpy())
voxels_xyz_normals_features.append(new_xyz_normals_feature.cpu().numpy())
voxels_features.append(new_features.cpu().numpy())
voxels_features_normals.append(new_features_normals.cpu().numpy())
voxels_xyz_features = np.concatenate(voxels_xyz_features)
voxels_xyz_normals_features = np.concatenate(voxels_xyz_normals_features)
voxels_features = np.concatenate(voxels_features)
voxels_features_normals = np.concatenate(voxels_features_normals)
##################################################
##################################################
# voxels_features_normals = torch.cuda.FloatTensor(voxels_features_normals)
# print(voxels_features_normals.shape)
# print(voxels_features_normals[0])
#
# # voxels_features_normals = voxels_features_normals.unsqueeze(0)
# # print(voxels_features_normals.shape)
#
# voxels_features_normals = mlp(voxels_features_normals)
# print(voxels_features_normals.shape)
# print(voxels_features_normals[0])
##################################################
#####################################################
# np.savez(path_to_voxels_xyz_features_sample,
# voxels_features=voxels_xyz_features,
# voxels_coords=voxel_coords,
# allow_pickle=False)
#
# np.savez(path_to_voxels_xyz_normals_features_sample,
# voxels_features=voxels_xyz_normals_features,
# voxels_coords=voxel_coords,
# allow_pickle=False)
#
# np.savez(path_to_voxels_features_sample,
# voxels_features=voxels_features,
# voxels_coords=voxel_coords,
# allow_pickle=False)
#
# np.savez(path_to_voxels_features_normals_sample,
# voxels_features=voxels_features_normals,
# voxels_coords=voxel_coords,
# allow_pickle=False)
###################################################
# end.record()
# torch.cuda.synchronize()
# print("timing voxelNet: ", start.elapsed_time(end))
# ###################################################
|
from cloudshell.shell.core.driver_context import ResourceCommandContext, AutoLoadDetails, AutoLoadAttribute, \
AutoLoadResource
from collections import defaultdict
class LegacyUtils(object):
def __init__(self):
self._datamodel_clss_dict = self.__generate_datamodel_classes_dict()
def migrate_autoload_details(self, autoload_details, context):
model_name = context.resource.model
root_name = context.resource.name
root = self.__create_resource_from_datamodel(model_name, root_name)
attributes = self.__create_attributes_dict(autoload_details.attributes)
self.__attach_attributes_to_resource(attributes, '', root)
self.__build_sub_resoruces_hierarchy(root, autoload_details.resources, attributes)
return root
def __create_resource_from_datamodel(self, model_name, res_name):
return self._datamodel_clss_dict[model_name](res_name)
def __create_attributes_dict(self, attributes_lst):
d = defaultdict(list)
for attribute in attributes_lst:
d[attribute.relative_address].append(attribute)
return d
def __build_sub_resoruces_hierarchy(self, root, sub_resources, attributes):
d = defaultdict(list)
for resource in sub_resources:
splitted = resource.relative_address.split('/')
parent = '' if len(splitted) == 1 else resource.relative_address.rsplit('/', 1)[0]
rank = len(splitted)
d[rank].append((parent, resource))
self.__set_models_hierarchy_recursively(d, 1, root, '', attributes)
def __set_models_hierarchy_recursively(self, dict, rank, manipulated_resource, resource_relative_addr, attributes):
if rank not in dict: # validate if key exists
pass
for (parent, resource) in dict[rank]:
if parent == resource_relative_addr:
sub_resource = self.__create_resource_from_datamodel(
resource.model.replace(' ', ''),
resource.name)
self.__attach_attributes_to_resource(attributes, resource.relative_address, sub_resource)
manipulated_resource.add_sub_resource(
self.__slice_parent_from_relative_path(parent, resource.relative_address), sub_resource)
self.__set_models_hierarchy_recursively(
dict,
rank + 1,
sub_resource,
resource.relative_address,
attributes)
def __attach_attributes_to_resource(self, attributes, curr_relative_addr, resource):
for attribute in attributes[curr_relative_addr]:
setattr(resource, attribute.attribute_name.lower().replace(' ', '_'), attribute.attribute_value)
del attributes[curr_relative_addr]
def __slice_parent_from_relative_path(self, parent, relative_addr):
if parent is '':
return relative_addr
return relative_addr[len(parent) + 1:] # + 1 because we want to remove the seperator also
def __generate_datamodel_classes_dict(self):
return dict(self.__collect_generated_classes())
def __collect_generated_classes(self):
import sys, inspect
return inspect.getmembers(sys.modules[__name__], inspect.isclass)
class ApV2(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'ApV2'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype ApV2
"""
result = ApV2(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'ApV2'
@property
def bands(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Bands'] if 'ApV2.Bands' in self.attributes else None
@bands.setter
def bands(self, value='dual-band'):
"""
:type value: str
"""
self.attributes['ApV2.Bands'] = value
@property
def radios(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radios'] if 'ApV2.Radios' in self.attributes else None
@radios.setter
def radios(self, value='2.4Ghz (2x2)'):
"""
:type value: str
"""
self.attributes['ApV2.Radios'] = value
@property
def radio_2dot4ghz(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radio 2dot4Ghz'] if 'ApV2.Radio 2dot4Ghz' in self.attributes else None
@radio_2dot4ghz.setter
def radio_2dot4ghz(self, value='(2x2)'):
"""
:type value: str
"""
self.attributes['ApV2.Radio 2dot4Ghz'] = value
@property
def radio_5ghz_1(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radio 5Ghz 1'] if 'ApV2.Radio 5Ghz 1' in self.attributes else None
@radio_5ghz_1.setter
def radio_5ghz_1(self, value='(2x2)'):
"""
:type value: str
"""
self.attributes['ApV2.Radio 5Ghz 1'] = value
@property
def radio_5ghz_2(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Radio 5Ghz 2'] if 'ApV2.Radio 5Ghz 2' in self.attributes else None
@radio_5ghz_2.setter
def radio_5ghz_2(self, value='N/A'):
"""
:type value: str
"""
self.attributes['ApV2.Radio 5Ghz 2'] = value
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['ApV2.model'] if 'ApV2.model' in self.attributes else None
@model.setter
def model(self, value):
"""
:type value: str
"""
self.attributes['ApV2.model'] = value
@property
def mode(self):
"""
:rtype: str
"""
return self.attributes['ApV2.mode'] if 'ApV2.mode' in self.attributes else None
@mode.setter
def mode(self, value='Wifi5'):
"""
:type value: str
"""
self.attributes['ApV2.mode'] = value
@property
def serial(self):
"""
:rtype: str
"""
return self.attributes['ApV2.serial'] if 'ApV2.serial' in self.attributes else None
@serial.setter
def serial(self, value):
"""
:type value: str
"""
self.attributes['ApV2.serial'] = value
@property
def jumphost(self):
"""
:rtype: bool
"""
return self.attributes['ApV2.jumphost'] if 'ApV2.jumphost' in self.attributes else None
@jumphost.setter
def jumphost(self, value):
"""
:type value: bool
"""
self.attributes['ApV2.jumphost'] = value
@property
def ip(self):
"""
:rtype: str
"""
return self.attributes['ApV2.ip'] if 'ApV2.ip' in self.attributes else None
@ip.setter
def ip(self, value):
"""
:type value: str
"""
self.attributes['ApV2.ip'] = value
@property
def jumphost_tty(self):
"""
:rtype: str
"""
return self.attributes['ApV2.jumphost_tty'] if 'ApV2.jumphost_tty' in self.attributes else None
@jumphost_tty.setter
def jumphost_tty(self, value='/dev/ttyAP1'):
"""
:type value: str
"""
self.attributes['ApV2.jumphost_tty'] = value
@property
def version(self):
"""
:rtype: str
"""
return self.attributes['ApV2.version'] if 'ApV2.version' in self.attributes else None
@version.setter
def version(self, value):
"""
:type value: str
"""
self.attributes['ApV2.version'] = value
@property
def port(self):
"""
:rtype: float
"""
return self.attributes['ApV2.port'] if 'ApV2.port' in self.attributes else None
@port.setter
def port(self, value='22'):
"""
:type value: float
"""
self.attributes['ApV2.port'] = value
@property
def uname(self):
"""
:rtype: str
"""
return self.attributes['ApV2.uname'] if 'ApV2.uname' in self.attributes else None
@uname.setter
def uname(self, value):
"""
:type value: str
"""
self.attributes['ApV2.uname'] = value
@property
def passkey(self):
"""
:rtype: string
"""
return self.attributes['ApV2.passkey'] if 'ApV2.passkey' in self.attributes else None
@passkey.setter
def passkey(self, value):
"""
:type value: string
"""
self.attributes['ApV2.passkey'] = value
@property
def pdu_host(self):
"""
:rtype: str
"""
return self.attributes['ApV2.PDU Host'] if 'ApV2.PDU Host' in self.attributes else None
@pdu_host.setter
def pdu_host(self, value):
"""
:type value: str
"""
self.attributes['ApV2.PDU Host'] = value
@property
def pdu_user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.PDU User'] if 'ApV2.PDU User' in self.attributes else None
@pdu_user.setter
def pdu_user(self, value):
"""
:type value: str
"""
self.attributes['ApV2.PDU User'] = value
@property
def pdu_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.PDU Password'] if 'ApV2.PDU Password' in self.attributes else None
@pdu_password.setter
def pdu_password(self, value):
"""
:type value: string
"""
self.attributes['ApV2.PDU Password'] = value
@property
def pdu_port(self):
"""
:rtype: str
"""
return self.attributes['ApV2.PDU Port'] if 'ApV2.PDU Port' in self.attributes else None
@pdu_port.setter
def pdu_port(self, value):
"""
:type value: str
"""
self.attributes['ApV2.PDU Port'] = value
@property
def user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.User'] if 'ApV2.User' in self.attributes else None
@user.setter
def user(self, value):
"""
User with administrative privileges
:type value: str
"""
self.attributes['ApV2.User'] = value
@property
def password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.Password'] if 'ApV2.Password' in self.attributes else None
@password.setter
def password(self, value):
"""
:type value: string
"""
self.attributes['ApV2.Password'] = value
@property
def enable_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.Enable Password'] if 'ApV2.Enable Password' in self.attributes else None
@enable_password.setter
def enable_password(self, value):
"""
The enable password is required by some CLI protocols such as Telnet and is required according to the device configuration.
:type value: string
"""
self.attributes['ApV2.Enable Password'] = value
@property
def power_management(self):
"""
:rtype: bool
"""
return self.attributes['ApV2.Power Management'] if 'ApV2.Power Management' in self.attributes else None
@power_management.setter
def power_management(self, value=True):
"""
Used by the power management orchestration, if enabled, to determine whether to automatically manage the device power status. Enabled by default.
:type value: bool
"""
self.attributes['ApV2.Power Management'] = value
@property
def sessions_concurrency_limit(self):
"""
:rtype: float
"""
return self.attributes['ApV2.Sessions Concurrency Limit'] if 'ApV2.Sessions Concurrency Limit' in self.attributes else None
@sessions_concurrency_limit.setter
def sessions_concurrency_limit(self, value='1'):
"""
The maximum number of concurrent sessions that the driver will open to the device. Default is 1 (no concurrency).
:type value: float
"""
self.attributes['ApV2.Sessions Concurrency Limit'] = value
@property
def snmp_read_community(self):
"""
:rtype: string
"""
return self.attributes['ApV2.SNMP Read Community'] if 'ApV2.SNMP Read Community' in self.attributes else None
@snmp_read_community.setter
def snmp_read_community(self, value):
"""
The SNMP Read-Only Community String is like a password. It is sent along with each SNMP Get-Request and allows (or denies) access to device.
:type value: string
"""
self.attributes['ApV2.SNMP Read Community'] = value
@property
def snmp_write_community(self):
"""
:rtype: string
"""
return self.attributes['ApV2.SNMP Write Community'] if 'ApV2.SNMP Write Community' in self.attributes else None
@snmp_write_community.setter
def snmp_write_community(self, value):
"""
The SNMP Write Community String is like a password. It is sent along with each SNMP Set-Request and allows (or denies) chaning MIBs values.
:type value: string
"""
self.attributes['ApV2.SNMP Write Community'] = value
@property
def snmp_v3_user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.SNMP V3 User'] if 'ApV2.SNMP V3 User' in self.attributes else None
@snmp_v3_user.setter
def snmp_v3_user(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['ApV2.SNMP V3 User'] = value
@property
def snmp_v3_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.SNMP V3 Password'] if 'ApV2.SNMP V3 Password' in self.attributes else None
@snmp_v3_password.setter
def snmp_v3_password(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: string
"""
self.attributes['ApV2.SNMP V3 Password'] = value
@property
def snmp_v3_private_key(self):
"""
:rtype: str
"""
return self.attributes['ApV2.SNMP V3 Private Key'] if 'ApV2.SNMP V3 Private Key' in self.attributes else None
@snmp_v3_private_key.setter
def snmp_v3_private_key(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['ApV2.SNMP V3 Private Key'] = value
@property
def snmp_v3_authentication_protocol(self):
"""
:rtype: str
"""
return self.attributes['ApV2.SNMP V3 Authentication Protocol'] if 'ApV2.SNMP V3 Authentication Protocol' in self.attributes else None
@snmp_v3_authentication_protocol.setter
def snmp_v3_authentication_protocol(self, value='No Authentication Protocol'):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['ApV2.SNMP V3 Authentication Protocol'] = value
@property
def snmp_v3_privacy_protocol(self):
"""
:rtype: str
"""
return self.attributes['ApV2.SNMP V3 Privacy Protocol'] if 'ApV2.SNMP V3 Privacy Protocol' in self.attributes else None
@snmp_v3_privacy_protocol.setter
def snmp_v3_privacy_protocol(self, value='No Privacy Protocol'):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['ApV2.SNMP V3 Privacy Protocol'] = value
@property
def snmp_version(self):
"""
:rtype: str
"""
return self.attributes['ApV2.SNMP Version'] if 'ApV2.SNMP Version' in self.attributes else None
@snmp_version.setter
def snmp_version(self, value=''):
"""
The version of SNMP to use. Possible values are v1, v2c and v3.
:type value: str
"""
self.attributes['ApV2.SNMP Version'] = value
@property
def enable_snmp(self):
"""
:rtype: bool
"""
return self.attributes['ApV2.Enable SNMP'] if 'ApV2.Enable SNMP' in self.attributes else None
@enable_snmp.setter
def enable_snmp(self, value=True):
"""
If set to True and SNMP isn???t enabled yet in the device the Shell will automatically enable SNMP in the device when Autoload command is called. SNMP must be enabled on the device for the Autoload command to run successfully. True by default.
:type value: bool
"""
self.attributes['ApV2.Enable SNMP'] = value
@property
def disable_snmp(self):
"""
:rtype: bool
"""
return self.attributes['ApV2.Disable SNMP'] if 'ApV2.Disable SNMP' in self.attributes else None
@disable_snmp.setter
def disable_snmp(self, value=False):
"""
If set to True SNMP will be disabled automatically by the Shell after the Autoload command execution is completed. False by default.
:type value: bool
"""
self.attributes['ApV2.Disable SNMP'] = value
@property
def console_server_ip_address(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Console Server IP Address'] if 'ApV2.Console Server IP Address' in self.attributes else None
@console_server_ip_address.setter
def console_server_ip_address(self, value):
"""
The IP address of the console server, in IPv4 format.
:type value: str
"""
self.attributes['ApV2.Console Server IP Address'] = value
@property
def console_user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Console User'] if 'ApV2.Console User' in self.attributes else None
@console_user.setter
def console_user(self, value):
"""
:type value: str
"""
self.attributes['ApV2.Console User'] = value
@property
def console_port(self):
"""
:rtype: float
"""
return self.attributes['ApV2.Console Port'] if 'ApV2.Console Port' in self.attributes else None
@console_port.setter
def console_port(self, value):
"""
The port on the console server, usually TCP port, which the device is associated with.
:type value: float
"""
self.attributes['ApV2.Console Port'] = value
@property
def console_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.Console Password'] if 'ApV2.Console Password' in self.attributes else None
@console_password.setter
def console_password(self, value):
"""
:type value: string
"""
self.attributes['ApV2.Console Password'] = value
@property
def cli_connection_type(self):
"""
:rtype: str
"""
return self.attributes['ApV2.CLI Connection Type'] if 'ApV2.CLI Connection Type' in self.attributes else None
@cli_connection_type.setter
def cli_connection_type(self, value='Auto'):
"""
The CLI connection type that will be used by the driver. Possible values are Auto, Console, SSH, Telnet and TCP. If Auto is selected the driver will choose the available connection type automatically. Default value is Auto.
:type value: str
"""
self.attributes['ApV2.CLI Connection Type'] = value
@property
def cli_tcp_port(self):
"""
:rtype: float
"""
return self.attributes['ApV2.CLI TCP Port'] if 'ApV2.CLI TCP Port' in self.attributes else None
@cli_tcp_port.setter
def cli_tcp_port(self, value):
"""
TCP Port to user for CLI connection. If kept empty a default CLI port will be used based on the chosen protocol, for example Telnet will use port 23.
:type value: float
"""
self.attributes['ApV2.CLI TCP Port'] = value
@property
def backup_location(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Backup Location'] if 'ApV2.Backup Location' in self.attributes else None
@backup_location.setter
def backup_location(self, value):
"""
Used by the save/restore orchestration to determine where backups should be saved.
:type value: str
"""
self.attributes['ApV2.Backup Location'] = value
@property
def backup_type(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Backup Type'] if 'ApV2.Backup Type' in self.attributes else None
@backup_type.setter
def backup_type(self, value='File System'):
"""
Supported protocols for saving and restoring of configuration and firmware files. Possible values are 'File System' 'FTP' and 'TFTP'. Default value is 'File System'.
:type value: str
"""
self.attributes['ApV2.Backup Type'] = value
@property
def backup_user(self):
"""
:rtype: str
"""
return self.attributes['ApV2.Backup User'] if 'ApV2.Backup User' in self.attributes else None
@backup_user.setter
def backup_user(self, value):
"""
Username for the storage server used for saving and restoring of configuration and firmware files.
:type value: str
"""
self.attributes['ApV2.Backup User'] = value
@property
def backup_password(self):
"""
:rtype: string
"""
return self.attributes['ApV2.Backup Password'] if 'ApV2.Backup Password' in self.attributes else None
@backup_password.setter
def backup_password(self, value):
"""
Password for the storage server used for saving and restoring of configuration and firmware files.
:type value: string
"""
self.attributes['ApV2.Backup Password'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def system_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.System Name'] if 'CS_GenericResource.System Name' in self.attributes else None
@system_name.setter
def system_name(self, value):
"""
A unique identifier for the device, if exists in the device terminal/os.
:type value: str
"""
self.attributes['CS_GenericResource.System Name'] = value
@property
def vendor(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Vendor'] if 'CS_GenericResource.Vendor' in self.attributes else None
@vendor.setter
def vendor(self, value=''):
"""
The name of the device manufacture.
:type value: str
"""
self.attributes['CS_GenericResource.Vendor'] = value
@property
def contact_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Contact Name'] if 'CS_GenericResource.Contact Name' in self.attributes else None
@contact_name.setter
def contact_name(self, value):
"""
The name of a contact registered in the device.
:type value: str
"""
self.attributes['CS_GenericResource.Contact Name'] = value
@property
def location(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Location'] if 'CS_GenericResource.Location' in self.attributes else None
@location.setter
def location(self, value=''):
"""
The device physical location identifier. For example Lab1/Floor2/Row5/Slot4.
:type value: str
"""
self.attributes['CS_GenericResource.Location'] = value
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Model'] if 'CS_GenericResource.Model' in self.attributes else None
@model.setter
def model(self, value=''):
"""
The device model. This information is typically used for abstract resource filtering.
:type value: str
"""
self.attributes['CS_GenericResource.Model'] = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Model Name'] if 'CS_GenericResource.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_GenericResource.Model Name'] = value
class ResourcePort(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'ApV2.ResourcePort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype ResourcePort
"""
result = ResourcePort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'ResourcePort'
@property
def mac_address(self):
"""
:rtype: str
"""
return self.attributes['ApV2.ResourcePort.MAC Address'] if 'ApV2.ResourcePort.MAC Address' in self.attributes else None
@mac_address.setter
def mac_address(self, value=''):
"""
:type value: str
"""
self.attributes['ApV2.ResourcePort.MAC Address'] = value
@property
def ipv4_address(self):
"""
:rtype: str
"""
return self.attributes['ApV2.ResourcePort.IPv4 Address'] if 'ApV2.ResourcePort.IPv4 Address' in self.attributes else None
@ipv4_address.setter
def ipv4_address(self, value):
"""
:type value: str
"""
self.attributes['ApV2.ResourcePort.IPv4 Address'] = value
@property
def ipv6_address(self):
"""
:rtype: str
"""
return self.attributes['ApV2.ResourcePort.IPv6 Address'] if 'ApV2.ResourcePort.IPv6 Address' in self.attributes else None
@ipv6_address.setter
def ipv6_address(self, value):
"""
:type value: str
"""
self.attributes['ApV2.ResourcePort.IPv6 Address'] = value
@property
def port_speed(self):
"""
:rtype: str
"""
return self.attributes['ApV2.ResourcePort.Port Speed'] if 'ApV2.ResourcePort.Port Speed' in self.attributes else None
@port_speed.setter
def port_speed(self, value):
"""
The port speed (e.g 10Gb/s, 40Gb/s, 100Mb/s)
:type value: str
"""
self.attributes['ApV2.ResourcePort.Port Speed'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_Port.Model Name'] if 'CS_Port.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_Port.Model Name'] = value
class GenericPowerPort(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'ApV2.GenericPowerPort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericPowerPort
"""
result = GenericPowerPort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'GenericPowerPort'
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['ApV2.GenericPowerPort.Model'] if 'ApV2.GenericPowerPort.Model' in self.attributes else None
@model.setter
def model(self, value):
"""
The device model. This information is typically used for abstract resource filtering.
:type value: str
"""
self.attributes['ApV2.GenericPowerPort.Model'] = value
@property
def serial_number(self):
"""
:rtype: str
"""
return self.attributes['ApV2.GenericPowerPort.Serial Number'] if 'ApV2.GenericPowerPort.Serial Number' in self.attributes else None
@serial_number.setter
def serial_number(self, value):
"""
:type value: str
"""
self.attributes['ApV2.GenericPowerPort.Serial Number'] = value
@property
def version(self):
"""
:rtype: str
"""
return self.attributes['ApV2.GenericPowerPort.Version'] if 'ApV2.GenericPowerPort.Version' in self.attributes else None
@version.setter
def version(self, value):
"""
The firmware version of the resource.
:type value: str
"""
self.attributes['ApV2.GenericPowerPort.Version'] = value
@property
def port_description(self):
"""
:rtype: str
"""
return self.attributes['ApV2.GenericPowerPort.Port Description'] if 'ApV2.GenericPowerPort.Port Description' in self.attributes else None
@port_description.setter
def port_description(self, value):
"""
The description of the port as configured in the device.
:type value: str
"""
self.attributes['ApV2.GenericPowerPort.Port Description'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_PowerPort.Model Name'] if 'CS_PowerPort.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_PowerPort.Model Name'] = value
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from app.core.models import *
class RegistroDesocupado(UserCreationForm):
dni = forms.CharField(required=True)
fecha_nacimiento = forms.DateField(required=True)
profesion = forms.CharField(max_length=200, required=False)
experiencia_laboral = forms.CharField(widget=forms.Textarea, max_length=700, required=False)
formacion = forms.CharField(widget=forms.Textarea, max_length=500, required=False)
habilidades = forms.CharField(widget=forms.Textarea, max_length=500, required=False)
trabajo_realizable = forms.CharField(max_length=500, required=False)
localidad = forms.CharField(max_length=500, required=False)
class Meta:
model = User
# Le pega a user, porq queremos que guarde el usuario,
# la creación del perfil la manejamos en el metodo de más abajo
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2')
def save(self):
# Llamamos al save ya definido en el formulario, esto automaticamente
# crea la empresa y el desocupado que actuan de perfil
user = super(RegistroDesocupado, self).save()
# Ahora le digo que rellene al usuario con todos los datos que correspondan
user.refresh_from_db()
# Y finalmente cargamos todos los elementos del desocupado desde el formulario
user.desocupado.dni = self.cleaned_data.get('dni')
user.desocupado.nombre = self.cleaned_data.get('first_name')
user.desocupado.apellido = self.cleaned_data.get('last_name')
user.desocupado.fecha_nacimiento = self.cleaned_data.get('fecha_nacimiento')
user.desocupado.profesion = self.cleaned_data.get('profesion')
user.desocupado.experiencia_laboral = self.cleaned_data.get('experiencia_laboral')
user.desocupado.formacion = self.cleaned_data.get('formacion')
user.desocupado.habilidades = self.cleaned_data.get('habilidades')
user.desocupado.trabajo_realizable = self.cleaned_data.get('trabajo_realizable')
user.desocupado.localidad = self.cleaned_data.get('localidad')
# Finalmente, guardamos el usuario con el desocupado ya completo
user.save()
# Y lo devolvemos
return user
class RegistroEmpresa(UserCreationForm):
cuit = forms.CharField(max_length=10)
razon_social = forms.CharField()
rubro = forms.CharField()
class Meta:
model = User
# Le pega a user, porq queremos que guarde el usuario,
# la creación del perfil la manejamos en el metodo de más abajo
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2')
def save(self):
# Llamamos al save ya definido en el formulario, esto automaticamente
# crea la empresa y el desocupado que actuan de perfil
user = super(RegistroEmpresa, self).save()
# Ahora le digo que rellene al usuario con todos los datos que correspondan
user.refresh_from_db()
# Y finalmente cargamos todos los elementos de la empresa
user.empresa.cuit = self.cleaned_data.get('cuit')
user.empresa.razon_social = self.cleaned_data.get('razon_social')
user.empresa.rubro = self.cleaned_data.get('rubro')
# Finalmente, guardamos el usuario con la empresa ya completo
user.save()
# Y lo devolvemos
return user
class EditarDesocupado(forms.ModelForm):
class Meta:
model = Desocupado
fields = ['nombre', 'apellido','fecha_nacimiento','localidad', 'experiencia_laboral','formacion', 'habilidades', 'trabajo_realizable', 'dni']
class EditarEmpresa(forms.ModelForm):
class Meta:
model = Empresa
fields = ['cuit', 'rubro', 'razon_social']
class OfertaForm(forms.ModelForm):
class Meta:
model = Oferta
fields = ['cargo','trabajo','horarios','profesion']
|
get_distance_inches(short_range=False)
Gets the measured distance in inches.
Parameters
short_range
Whether or not to use short range mode. Short range mode increases accuracy, but it can only detect nearby objects.
Type
:
boolean
Values
:
True or False
Default
:
False
Returns
The measured distance or "none" if the distance can't be measured.
Type
:
float (decimal number)
Values
:
any value between 0 and 79
Errors
TypeError
short_range is not a boolean.
RuntimeError
The sensor has been disconnected from the Port.
Events
|
from django.contrib import admin
from .models import SubShop
admin.site.register(SubShop)
|
from __future__ import annotations
import logging
import numpy as np
from collections import defaultdict
from typing import Dict, Optional, List
from .common import Vec2, dist2
from .events import AnimationEndedEvent, Event, PlayerShootEvent
from .graphics import Canvas, Line, LineType, Sprite, AnimatedSprite
from .textures import TextureManager, Texture
logger = logging.getLogger(__name__)
class Hitbox:
def __init__(self, width: int, height: int, data: Optional[np.array] = None) -> None:
self.width = width
self.height = height
if data is not None:
self.data = data
else:
self.data = np.zeros((height, width), dtype=np.uint8)
@classmethod
def from_circle(cls, diameter: int) -> Hitbox:
radius = diameter // 2
center = Vec2(diameter // 2, diameter // 2)
hitbox = Hitbox(diameter, diameter)
for row in range(diameter):
for col in range(diameter):
if dist2(Vec2(col, row), center) < radius ** 2:
hitbox.data[row, col] = 1
return hitbox
@classmethod
def from_rectangle(cls, width: int, height: int) -> Hitbox:
return Hitbox(width, height, np.ones((width, height), dtype=np.uint8))
@classmethod
def from_texture(cls, texture: Texture) -> Hitbox:
hitbox_mask = texture.mask > 0
if texture.mask.ndim > 2:
hitbox_mask = texture.mask.sum(axis = 2) > 0
logger.info(f'Hitbox mask shape: {hitbox_mask.shape}')
return Hitbox(texture.height, texture.width, hitbox_mask)
class Object():
kind = 'Object'
def __init__(self, x, y) -> None:
self.xc = x
self.yc = y
self.sprite = Sprite(texture=TextureManager.blank())
self.hitbox = Hitbox(0, 0)
@property
def x(self) -> int:
return self.xc - self.sprite.width // 2
@property
def y(self) -> int:
return self.yc - self.sprite.height // 2
def draw(self, canvas: Canvas):
self.sprite.draw(canvas, self.x, self.y)
class Bullet(Object):
kind = 'Bullet'
def __init__(self, x: int, y: int, speed: int, size: int, sprite: str) -> None:
super().__init__(x, y)
self.speed = speed
texture = TextureManager.get(sprite)
self.sprite = Sprite(texture)
self.hitbox = Hitbox.from_circle(size)
def update(self, canvas: Canvas, delta: float) -> Optional[Event]:
self.xc += round(self.speed * delta)
self.draw(canvas)
class BulletFactory:
def __init__(self, config: Dict):
self.config = config
self.bullet_size = config['size']
def create(self, pos: Vec2):
return Bullet(pos.x - self.bullet_size // 2, pos.y, **self.config)
class Player(Object):
kind = 'Player'
def __init__(self, **config) -> None:
x, y = config['start_pos']
super().__init__(x, y)
texture = TextureManager.get(config['sprite'])
self.speed = config['speed']
self.sprite = Sprite(texture)
self.hitbox = Hitbox.from_texture(texture)
self.ymax = config['ymax'] - self.sprite.height // 2
self.ymin = self.sprite.height // 2
def process_input(self, key: int) -> List[Event]:
events = []
if key == ord('w') and self.in_bounds(self.yc - 1):
self.yc -= 1
elif key == ord('s') and self.in_bounds(self.yc + 1):
self.yc += 1
elif key == ord(' '):
events.append(PlayerShootEvent(sender=self))
return events
def in_bounds(self, yc):
return self.ymin <= yc < self.ymax
def update(self, canvas: Canvas, delta: float) -> Optional[Event]:
self.xc += round(self.speed * delta)
self.draw(canvas)
def bullet_spawn_pos(self) -> Vec2:
return Vec2(self.x + self.sprite.width, self.yc)
class Block(Object):
kind = 'Block'
def __init__(self, config: Dict) -> None:
x, y = config['start_pos']
super().__init__(x, y)
texture = TextureManager.get(config['sprite'])
self.sprite = Sprite(texture)
self.hitbox = Hitbox.from_rectangle(self.sprite.width, self.sprite.height)
def update(self, canvas: Canvas, delta: float) -> Optional[Event]:
self.draw(canvas)
class Enemy(Object):
kind = 'Enemy'
MAX_LENGTH = 200
def __init__(self, config: Dict) -> None:
x, y = config.pop('start_pos')
super().__init__(x, y)
name = config.pop('sprite')
texture = TextureManager.get(name)
self.hangs = config.pop('hangs')
self.line = None
if self.hangs:
self.line = Line(xs=self.xc, ys=0, xe=self.xc, ye=self.yc,
width=1, type=LineType.DASHED, data=np.array([]))
self.line.generate_data(self.MAX_LENGTH, 1, LineType.DASHED, 1)
self.sprite = AnimatedSprite(texture=texture, **config)
self.hitbox = Hitbox.from_texture(texture)
def draw(self, canvas: Canvas):
super().draw(canvas)
if self.hangs:
self.line.draw(canvas)
def update(self, canvas: Canvas, delta: float) -> Optional[Event]:
e = self.sprite.update(delta)
if not e:
logger.info('Explosion animation ended')
return AnimationEndedEvent(sender=self)
self.draw(canvas)
class ExplosionFactory:
def __init__(self, config: Dict):
self.config = config
def create(self, pos: Vec2) -> Explosion:
return Explosion(pos.x, pos.y, **self.config)
class Explosion(Object):
kind = 'Explosion'
def __init__(self, x: int = 0, y: int = 0, **kwargs) -> Optional[Event]:
super().__init__(x, y)
name = kwargs.pop('sprite')
texture = TextureManager.get(name)
self.sprite = AnimatedSprite(texture=texture, **kwargs)
def update(self, canvas: Canvas, delta: float):
e = self.sprite.update(delta)
if not e:
logger.info('Explosion animation ended')
return AnimationEndedEvent(sender=self)
self.draw(canvas)
class Goal(Object):
kind = 'Goal'
def __init__(self, config: Dict) -> None:
x, y = config['start_pos']
super().__init__(x, y)
texture = TextureManager.get(config['sprite'])
self.sprite = Sprite(texture)
self.hitbox = Hitbox.from_rectangle(self.sprite.width, self.sprite.height)
def update(self, canvas: Canvas, delta: float) -> Optional[Event]:
self.draw(canvas)
class ObjectManager:
def __init__(self) -> None:
self.objects = defaultdict(list)
self.remove_queue = []
def traverse(self):
for objects in self.objects.values():
for object in objects:
yield object
def add_object(self, object):
kind = object.kind
if kind == Player.kind:
assert len(self.objects[kind]) == 0, "Player must be unique"
self.objects[kind].append(object)
logger.info(f'Adding object to the game: {kind} - {object}')
def remove_object(self, object):
kind = object.kind
self.objects[kind].remove(object)
logger.info(f'Removing object from the game: {kind} - {object}')
def process_input(self, key: int) -> List[Event]:
events = []
for object in self.traverse():
if hasattr(object, 'process_input'):
events.extend(object.process_input(key))
return events
def update(self, canvas: Canvas, delta: float):
events = []
for object in self.traverse():
e = object.update(canvas, delta)
if e:
events.append(e)
return events
|
from functools import wraps
try: # Assume we're a sub-module in a package.
from utils import arguments as arg
from loggers.logger_interface import LoggerInterface
from loggers.extended_logger_interface import ExtendedLoggerInterface, LoggingLevel
from loggers.selection_logger_interface import SelectionLoggerInterface
from loggers.extended_logger import ExtendedLogger, SingletonLogger, DEFAULT_LOGGER_NAME, DEFAULT_FORMATTER
from loggers.progress_interface import ProgressInterface, OperationStatus
from loggers.progress import Progress
from loggers.detailed_message import DetailedMessage, SelectionError
from loggers.message_collector import MessageCollector, SelectionMessageCollector, CommonMessageCollector
from loggers.logging_context_stub import LoggingContextStub
from loggers.fallback_logger import FallbackLogger
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ..utils import arguments as arg
from .logger_interface import LoggerInterface
from .extended_logger_interface import ExtendedLoggerInterface, LoggingLevel
from .selection_logger_interface import SelectionLoggerInterface
from .extended_logger import ExtendedLogger, SingletonLogger, DEFAULT_LOGGER_NAME, DEFAULT_FORMATTER
from .progress_interface import ProgressInterface, OperationStatus
from .progress import Progress
from .detailed_message import DetailedMessage, SelectionError
from .message_collector import MessageCollector, SelectionMessageCollector, CommonMessageCollector
from .logging_context_stub import LoggingContextStub
from .fallback_logger import FallbackLogger
DEFAULT_LOGGING_LEVEL = LoggingLevel.get_default()
def get_method_name(level: LoggingLevel = LoggingLevel.Info):
if not isinstance(level, LoggingLevel):
level = LoggingLevel(level)
return level.get_method_name()
def get_logger(name=DEFAULT_LOGGER_NAME, level=DEFAULT_LOGGING_LEVEL, context=None):
if name == DEFAULT_LOGGER_NAME:
return SingletonLogger(name=name, level=level, context=context)
else:
return ExtendedLogger(name=name, level=level, context=context)
def get_base_logger(name=DEFAULT_LOGGER_NAME, level=DEFAULT_LOGGING_LEVEL, formatter=DEFAULT_FORMATTER):
return ExtendedLogger.build_base_logger(name=name, level=level, formatter=formatter)
def get_selection_logger(**kwargs):
return get_logger().get_selection_logger(**kwargs)
def is_logger(obj, by_methods=False):
if isinstance(obj, LoggerInterface):
return True
elif 'Logger' in obj.__class__.__name__:
return True
elif by_methods and hasattr(obj, 'log') and hasattr(obj, 'warning'):
return True
else:
return False
def deprecated(func):
@wraps(func)
def new_func(*args, **kwargs):
message = 'Method {}.{}() is deprecated.'
get_logger().warning(message.format(func.__module__, func.__name__))
return func(*args, **kwargs)
return new_func
def deprecated_with_alternative(alternative):
def _deprecated(func):
@wraps(func)
def new_func(*args, **kwargs):
message = 'Method {}.{}() is deprecated, use {} instead.'
get_logger().warning(message.format(func.__module__, func.__name__, alternative))
return func(*args, **kwargs)
return new_func
return _deprecated
|
#!/usr/bin/python
# Copyright (C) 2010-2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DB migration for Model Update on 2012-04-25.
Intended to be run through the remote API:
remote_api_shell.py -s persistent-cal.appspot.com
s~persistent-cal> import sys
s~persistent-cal> sys.path.append('/path/to/persistent-cal')
s~persistent-cal> from db_migration_2012_04_25 import UpdateEvents
s~persistent-cal> UpdateEvents()
Note:
We may move gcal_edit into event_data as the 'id' key, but not here.
"""
__author__ = 'daniel.j.hermes@gmail.com (Daniel Hermes)'
# General libraries
import json
# App engine specific libraries
from google.appengine.ext import db
# App specific libraries
from library import JsonAscii
from models import Event
def TransformEventData(event_data):
"""Takes Event object to new specification."""
new_event_data = {}
new_event_data['summary'] = event_data['summary']
new_event_data['description'] = event_data['description']
# Where
new_event_data['location'] = event_data['location']
# When
start = event_data['when:from']
if start.endswith('Z'):
new_event_data['start'] = {'dateTime': start}
else:
new_event_data['start'] = {'date': start}
end = event_data['when:to']
if end.endswith('Z'):
new_event_data['end'] = {'dateTime': end}
else:
new_event_data['end'] = {'date': end}
return new_event_data
def UpdateEvents():
events = Event.all()
for event in events:
event_data = json.loads(event.event_data)
new_event_data = TransformEventData(event_data)
event.event_data_old = db.Text(JsonAscii(event_data))
event.event_data = db.Text(JsonAscii(new_event_data))
event.put()
|
from typing import Any
class ContainsLoopError(Exception):
pass
class Node:
def __init__(self, data: Any) -> None:
self.data = data
self.next_node = None
def __iter__(self):
node = self
visited = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(node)
yield node.data
node = node.next_node
@property
def has_loop(self) -> bool:
"""
A loop is when the exact same Node appears more than once in a linked list.
>>> root_node = Node(1)
>>> root_node.next_node = Node(2)
>>> root_node.next_node.next_node = Node(3)
>>> root_node.next_node.next_node.next_node = Node(4)
>>> root_node.has_loop
False
>>> root_node.next_node.next_node.next_node = root_node.next_node
>>> root_node.has_loop
True
"""
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
root_node = Node(1)
root_node.next_node = Node(2)
root_node.next_node.next_node = Node(3)
root_node.next_node.next_node.next_node = Node(4)
print(root_node.has_loop) # False
root_node.next_node.next_node.next_node = root_node.next_node
print(root_node.has_loop) # True
root_node = Node(5)
root_node.next_node = Node(6)
root_node.next_node.next_node = Node(5)
root_node.next_node.next_node.next_node = Node(6)
print(root_node.has_loop) # False
root_node = Node(1)
print(root_node.has_loop) # False
|
"""
Functionality related to updating dynamic update packages and
engine upgrades
"""
from smc.base.model import SubElement
from smc.administration.tasks import Task
class PackageMixin(object):
"""
Manages downloads and activations of update packages and software
upgrades
"""
def download(self, timeout=5, wait_for_finish=False):
"""
Download Package or Engine Update
:param int timeout: timeout between queries
:raises TaskRunFailed: failure during task status
:rtype: TaskOperationPoller
"""
return Task.execute(self, 'download', timeout=timeout,
wait_for_finish=wait_for_finish)
def activate(self, resource=None, timeout=3, wait_for_finish=False):
"""
Activate this package on the SMC
:param list resource: node href's to activate on. Resource is only
required for software upgrades
:param int timeout: timeout between queries
:raises TaskRunFailed: failure during activation (downloading, etc)
:rtype: TaskOperationPoller
"""
return Task.execute(self, 'activate', json={'resource': resource},
timeout=timeout, wait_for_finish=wait_for_finish)
@property
def release_notes(self):
"""
HTTP location of the release notes
"""
return self.data.get('release_notes')
class EngineUpgrade(PackageMixin, SubElement):
"""
Engine Upgrade package management
For example, to check engine upgrades and find a specific
one, then download for installation::
system = System()
upgrades = system.engine_upgrade()
package = upgrades.get_contains('6.2')
poller = package.download(wait_for_finish=True)
while not poller.done():
print(poller.result(3))
print("Finished download: %s" % poller.result())
package.activate()
"""
@property
def release_date(self):
"""
Release date for this engine upgrade
"""
return self.data.get('release_date')
@property
def version(self):
"""
Engine upgrade version
"""
return self.data.get('version')
@property
def platform(self):
"""
Platform for this engine upgrade
"""
return self.data.get('platform')
class UpdatePackage(PackageMixin, SubElement):
"""
Container for managing update packages on SMC
Download and activate a package::
system = System()
packages = system.update_package()
dynup = packages.get_contains('1007')
poller = dynup.download(wait_for_finish=True)
while not poller.done():
print(poller.result(3))
print("Finished download: %s" % poller.result())
package.activate()
"""
@property
def activation_date(self):
"""
Date this update was activated, if any
:rtype: str
"""
return self.data.get('activation_date')
@property
def package_id(self):
"""
ID of the package. These will increment as new versions
are released.
:rtype: str
"""
return self.data.get('package_id')
@property
def release_date(self):
"""
Date of release
:rtype: str
"""
return self.data.get('release_date')
@property
def state(self):
"""
State of this package as string. Valid states are available, imported, active.
If the package is available, you can execute a download. If the package is
imported, you can activate.
:rtype: str
"""
return self.data.get('state')
|
"""Contains class for model training."""
import os
import itertools
import time
import numpy as np
import imageio
import torch
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader
from ..utils.visualize import (
plot_boxes, plot_grad_flow, plot_patches, plot_bg, get_reward_annotation)
from ..utils.utils import ExperimentLogger, bw_transform
import wandb
class AbstractTrainer:
"""Abstract trainer class.
Exists, s.t. Trainer can share code between STOVE and supervised approach.
"""
def __init__(self, config, stove, train_dataset, test_dataset):
"""Set up abstract trainer."""
self.stove = stove
self.params = stove.parameters()
if config.debug_test_mode:
config.print_every = 1
config.plot_every = 1
self.c = config
# implemented as property, s.t. train_dataset can easily be overwritten
# from the outside for mcts loop training
self.dataloader = train_dataset
self.test_dataset = test_dataset
self.test_dataloader = DataLoader(
test_dataset,
batch_size=self.c.batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
self.optimizer = optim.Adam(
self.stove.parameters(),
lr=self.c.learning_rate,
amsgrad=self.c.debug_amsgrad)
if self.c.load_encoder is not None:
print('Pre-loading encoder from checkpoint!')
self.load_encoder()
if not self.c.supair_grad:
self.disable_supair_grad()
# if we restore from checkpoint, also restore epoch and step
self.epoch_start, self.step_start = 0, 0
if self.c.checkpoint_path is not None:
self.load()
@property
def dataloader(self):
"""Return train_dataset if set already."""
return self._train_dataset
@dataloader.setter
def dataloader(self, train_dataset):
"""Set train_dataset by wrapping DataLoader."""
self._train_dataset = DataLoader(
train_dataset,
batch_size=self.c.batch_size,
shuffle=True,
num_workers=self.c.num_workers,
drop_last=True)
def save(self, epoch, step):
"""Save model dict, optimizer and progress indicator."""
path = os.path.join(self.logger.checkpoint_dir, 'ckpt')
torch.save({
'epoch': epoch,
'step': step,
'model_state_dict': self.stove.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, path + '_{:05d}'.format(step))
torch.save({
'epoch': epoch,
'step': step,
'model_state_dict': self.stove.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, path)
print('Parameters saved to {}'.format(self.logger.exp_dir))
def load(self):
"""Load model dict from checkpoint."""
checkpoint = torch.load(
self.c.checkpoint_path, map_location=self.c.device)
# stay compatible with old loading
if 'model_state_dict' in checkpoint.keys():
self.stove.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
step = checkpoint['step']
self.epoch_start = epoch
self.step_start = step
else:
self.stove.load_state_dict(checkpoint)
print('Parameters loaded from {}.'.format(self.c.checkpoint_path))
def load_encoder(self):
"""Load weights of encoder.
Adapted from discuss.pytorch.org/t/23962.
"""
pretrained_dict = torch.load(
self.c.load_encoder, map_location=self.c.device)
pretrained_dict = pretrained_dict['model_state_dict']
model_dict = self.stove.state_dict()
# 1. filter out unnecessary keys, only load spn
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict and 'encoder' in k}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
self.stove.load_state_dict(model_dict)
print('Loaded the following supair parameters from {}:'.format(
self.c.load_encoder))
print(pretrained_dict.keys())
def disable_supair_grad(self):
"""Disable gradient for SuPAIR if desired."""
for p in self.stove.sup.parameters():
p.requires_grad = False
for n, p in self.stove.named_parameters():
print('Gradients for {} enabled: {}'.format(n, p.requires_grad))
def init_t(self, tensor):
"""Move tensor to self.c.device and cast to self.c.dtype."""
return tensor.type(self.c.dtype).to(device=self.c.device)
def adjust_learning_rate(self, optimizer, value, step):
"""Adjust learning rate during training."""
lr = self.c.learning_rate * np.exp(-step / value)
lr = max(lr, self.c.min_learning_rate)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def prediction_error(self):
"""Abstract method."""
raise ValueError('Needs to be overwritten by derived class.')
def plot_results(self):
"""Abstract method."""
raise ValueError('Needs to be overwritten by derived class.')
def test(self):
"""Abstract method."""
raise ValueError('Needs to be overwritten by derived class.')
def train(self):
"""Abstract method."""
raise ValueError('Needs to be overwritten by derived class.')
def long_rollout(self):
"""Abstract method."""
raise ValueError('Needs to be overwritten by derived class.')
class Trainer(AbstractTrainer):
"""Trainer for model optimization.
Fully compatible with STOVE as well as action-conditioned STOVE.
"""
def __init__(self, config, stove, train_dataset, test_dataset):
"""Set up trainer.
This is conveniently called with main.py. Given a valid config, main.py
takes care of initalising the trainer, model and dataset.
Do not modify the config object.
"""
super().__init__(config, stove, train_dataset, test_dataset)
self.logger = ExperimentLogger(self.c)
# differentiate between z from dynamics , z from supair, and
# combined z
self.z_types = ['z', 'z_sup', 'z_dyn'] if not self.c.supair_only else ['z']
if self.c.action_conditioned:
if not self.c.debug_mse:
self.reward_loss = nn.BCELoss()
else:
self.reward_loss = nn.MSELoss()
def prediction_error(self, predicted, true,
return_velocity=True, return_id_swaps=True,
return_full=False, return_matched=False,
level='sequence'):
"""Error of predicted positions and velocities against ground truth.
Args:
predicted (torch.Tensor), (n, T, o, 4): Stoves positions and
velocities.
true (torch.Tensor), (n, T, o, 4): Positions and velocities from env.
return_velocity (bool): Return velocity errors.
return_id_swaps (bool): Return percentage of id swaps over sequence.
return_full (bool): Return errors over T dimension.
return_matched (bool): Return permuted positions and velocities.
level (str): Set to 'sequence' or 'imgage'. Object orderings for a
sequence are not aligned, b/c model is unsupervised. Need to be
matched. Specify level at which to match the true and predicted
object ordering. For physics prediciton, we should only allow
one global permutation of the predicted object ordering over the
sequence, since we want id swaps to affect the error. For SuPair
(only) training, we want to get the error per image.
Returns:
res (dict): Results dictionary containing errors, as set by the
above flags.
"""
if self.c.supair_only:
return_velocity = False
level = 'image'
# use at moste the first 4 time values for assigning object ordering
T = min(4, predicted.shape[1])
pos_pred = predicted[..., :2]
pos_true = true[..., :2]
errors = []
permutations = list(itertools.permutations(range(0, self.c.num_obj)))
if level == 'sequence':
# unsupervised learning cannot be punished if it gets object order wrong
# therefore get all n! object combinations and take lowest error for
# a given sequence!
for perm in permutations:
error = ((pos_pred[:, :T, perm] - pos_true[:, :T])**2).sum(-1)
error = torch.sqrt(error).mean((1, 2))
errors += [error]
"""sum_k/T(sum_j/o(root(sum_i((x_i0-x_i1)**2))))
sum_i over x and y coordinates -> root(sum squared) is
distance of objects for that permutation. sum j is then over
all objects in image and sum_k over all images in sequence.
that way we do 1 assignment of objects over whole sequence!
this loss will now punish id swaps over sequence. sum_j and
_k are mean. st. we get the mean distance to true position
"""
# shape (n, o!)
errors = torch.stack(errors, 1)
# sum to get error per image
_, idx = errors.min(1)
# idx now contains a single winning permutation per sequence!
selector = list(zip(range(idx.shape[0]), idx.cpu().tolist()))
pos_matched = [pos_pred[i, :, permutations[j]] for i, j in selector]
pos_matched = torch.stack(pos_matched, 0)
elif level == 'image':
pos_pred_f = pos_pred.flatten(end_dim=1)
pos_true_f = pos_true.flatten(end_dim=1)
for perm in permutations:
errors += [torch.sqrt(
((pos_pred_f[:, perm] - pos_true_f)**2).sum(-1)).mean((1))]
errors = torch.stack(errors, 1)
_, idx = errors.min(1)
selector = list(zip(range(idx.shape[0]), idx.cpu().tolist()))
pos_matched = [pos_pred_f[i, permutations[j]] for i, j in selector]
pos_matched = torch.stack(pos_matched, 0)
pos_matched = pos_matched.reshape(predicted[..., :2].shape)
else:
raise ValueError
res = {}
if not return_full:
min_errors = torch.sqrt(
((pos_matched - pos_true)**2).sum(-1)).mean((1, 2))
res['error'] = min_errors.mean().cpu()
res['std_error'] = min_errors.std().cpu()
else:
# return error over sequence!
# get correctly matched sequence
error_over_sequence = torch.sqrt(
((pos_matched - pos_true)**2).sum(-1)).mean(-1)
res['error'] = error_over_sequence.mean(0).cpu()
res['std_error'] = error_over_sequence.std(0).cpu()
if return_velocity:
# get velocities and transform
vel_pred = predicted[..., 2:4]
# get correctly matched velocities
vel_matched = [vel_pred[i, :, permutations[j]] for i, j in selector]
vel_matched = torch.stack(vel_matched, 0)
# again. root of sum of squared distances per object. mean over image.
vel_true = true[..., 2:]
v_errors = torch.sqrt(((vel_true - vel_matched)**2).sum(-1)).mean(-1)
if not return_full:
res['v_error'] = v_errors.mean().cpu()
res['std_v_error'] = v_errors.std().cpu()
else:
# do mean along all images in sequence
res['v_error'] = v_errors.mean(0).cpu()
res['std_v_error'] = v_errors.std(0).cpu()
if return_matched:
res['pos_matched'] = pos_matched
res['vel_matched'] = vel_matched
if return_id_swaps:
# Do min over img instead of over sequence. This is equiv to old
# way of calculating error.
errors = []
for perm in permutations:
# this is distance of object pairing
predicted_f = pos_pred.flatten(end_dim=1)
true_f = pos_true.flatten(end_dim=1)
errors += [torch.sqrt(((predicted_f[:, perm] - true_f)**2).sum(-1))]
errors = torch.stack(errors, 1)
# mean per img then min along stack axis
_, idx = errors.mean(-1).min(1)
# how many sequences contain more than one object ordering
idx = idx.reshape(true.shape[:2])[:, :]
id_swaps = torch.chunk(idx, idx.shape[1], 1)
id_swaps = [i.squeeze() for i in id_swaps]
# compare each t to t+1 in terms of minimum indices, returns n times 0 or 1
# for each comparison
id_swaps = [id_swaps[i] == id_swaps[i+1] for i in range(len(id_swaps)-1)]
id_swaps = torch.stack(id_swaps, 1)
# if each is the same as its neighbor, the whole seq is the same (1)
# if not, there are zeros in prod
id_swaps = torch.prod(id_swaps, 1)
# sum to get number instead of 0, 1 list
id_swaps = id_swaps.sum()
id_swap_percentage = (idx.shape[0] - id_swaps.cpu().double()) / idx.shape[0]
res['swaps'] = id_swap_percentage
return res
def plot_results(self, step_counter, images, prop_dict, future=False):
"""Plot images of sequences and predicted bounding boxes.
Currently not in use.
"""
# just give first sequences
n_seq = self.c.n_plot_sequences
plot_images = images[:n_seq].detach().cpu().numpy()
# ignore velocities if available
plot_z = prop_dict['z'].flatten(end_dim=1)[..., :4].cpu().numpy()
if not self.c.nolog:
add = '_rollout' if future else ''
save_path = os.path.join(
self.logger.img_dir, '{:05d}{}.png'.format(step_counter, add))
else:
save_path = None
plot_boxes(
plot_images, plot_z,
self.c.width,
self.c.height,
n_sequences=n_seq,
future=future,
save_path=save_path,
)
if self.c.debug_extend_plots:
overlap = prop_dict['overlap_ratios'].cpu().numpy()
marg_patch = prop_dict['marginalise_flat'].detach().reshape(
(-1, self.c.patch_width, self.c.patch_height)).cpu().numpy()
marginalise_bg = prop_dict['marginalise_bg'].cpu().numpy()
bg_loglik = prop_dict['bg_loglik'].cpu().numpy()
plot_bg(marginalise_bg, bg_loglik, n_sequences=n_seq)
patches = prop_dict['patches'].cpu().numpy()
patches_ll = prop_dict['patches_loglik'].cpu().numpy()
plot_patches(patches, marg_patch, overlap, patches_ll, self.c)
def train(self, num_epochs=None):
"""Run training loop.
Also takes care of intermediate testing and logging.
"""
print('Starting training for {}'.format(self.c.description))
print('Only pretraining.' if self.c.supair_only else 'Full inference.')
start_epoch = self.epoch_start
step_counter = self.step_start
start = time.time()
if not self.c.supair_only:
self.test(step_counter, start)
if num_epochs is None:
num_epochs = self.c.num_epochs
for epoch in range(start_epoch, num_epochs):
for data in self.dataloader:
now = time.time() - start
step_counter += 1
if self.c.debug_anneal_lr:
self.adjust_learning_rate(
self.optimizer, self.c.debug_anneal_lr, step_counter)
# Load data
images = self.init_t(data['present_images'])
if self.c.action_conditioned:
actions = self.init_t(data['present_actions'])
else:
actions = None
# Model optimization
self.optimizer.zero_grad()
elbo, prop_dict, rewards = self.stove(
images,
step_counter,
actions,
self.c.supair_only)
min_ll = -1.0 * elbo
if self.c.action_conditioned:
target_rewards = data['present_rewards'][:, self.c.skip:]
target_rewards = self.init_t(target_rewards)
mse_rewards = self.reward_loss(
rewards.flatten(), target_rewards.flatten())
if self.c.debug_reward_rampup is not False:
reward_weight = min(
1, step_counter/self.c.debug_reward_rampup)
else:
reward_weight = 1
reward_factor = self.c.debug_reward_factor
min_ll = min_ll + reward_factor * reward_weight * mse_rewards
else:
mse_rewards = torch.Tensor([0])
min_ll.backward()
if self.c.debug_gradient_clip:
torch.nn.utils.clip_grad_norm_(self.stove.parameters(), 1)
self.optimizer.step()
# Plot examples
if step_counter % self.c.plot_every == 0:
plot_grad_flow(self.stove.named_parameters())
# full states only available after some steps
plot_images = images[:, self.c.skip:]
self.plot_results(step_counter, plot_images, prop_dict)
# Print and log performance
if step_counter % self.c.print_every == 0:
self.error_and_log(
elbo.item(), mse_rewards.item(), min_ll.item(),
prop_dict, data, step_counter, now)
wandb.log({'elbo': elbo.item(), 'mse_reward': mse_rewards.item(), 'min_ll': min_ll.item(),
'step_counter': step_counter, 'epoch': epoch})
# Save parameters
if step_counter % self.c.save_every == 0:
self.save(epoch, step_counter)
if step_counter % self.c.long_rollout_every == 0:
self.long_rollout(idx=[0, 1])
if self.c.debug_test_mode and not self.c.supair_only:
self.save(0, 0)
self.test(step_counter, now)
break
# Test each epoch
if not self.c.supair_only:
self.test(step_counter, start)
print("Epoch: ", epoch, " finished.")
if self.c.debug_test_mode:
break
# Create some more rollouts at the end of training
if not self.c.debug_test_mode and not self.c.supair_only:
self.long_rollout(step_counter=step_counter)
# Save model in final state
if not self.c.nolog:
self.save(epoch, step_counter)
succ = os.path.join(self.logger.exp_dir, 'success')
open(succ, 'w').close()
print('Finished Training!')
def error_and_log(self, elbo, reward, min_ll, prop_dict, data, step_counter,
now, add=''):
"""Format performance metrics and pass them to logger.
Args:
elbo (float): Elbo value.
reward (float): Mean reward value.
min_ll (float): Total loss.
prop_dict (dict): Dict from model containing further metrics.
data (dict): Current data dict. Needed to compute errors.
step_counter (int): Current step.
now (int): Time elapsed.
add (str): Identifier for log entries. Used if, e.g. this function
is called from test() rather than train().
"""
skip = self.c.skip
# perf_dict contains performance values and will be passed to logger
perf_dict = {
'step': step_counter,
'time': now,
'elbo': elbo,
'reward': reward,
'min_ll': min_ll}
# non z entries
other_keys = list(filter(lambda x: x[0] != 'z', list(prop_dict.keys())))
other_dict = {key: prop_dict[key] for key in other_keys}
perf_dict.update(other_dict)
# get errors for each of the z types
z_true = data['present_labels'][:, skip:]
z_true = self.init_t(z_true)
for z in self.z_types:
if z in ['z', 'z_sup']:
# have scales and need to ignore for prediction_error
predicted = prop_dict[z][..., 2:]
scales = prop_dict[z].flatten(end_dim=2)[:, :2].mean(0)
perf_dict['scale_x'] = scales[0]
perf_dict['scale_y'] = scales[1]
else:
predicted = prop_dict[z]
perf_dict['scale_x'] = float('nan')
perf_dict['scale_y'] = float('nan')
error_dict = self.prediction_error(
predicted, z_true)
perf_dict.update(error_dict)
z_std = prop_dict[z+'_std']
for i, std in enumerate(z_std):
perf_dict[z+'_std_{}'.format(i)] = std
perf_dict['type'] = z + add
self.logger.performance(
perf_dict)
@torch.no_grad()
def test(self, step_counter, start):
"""Evaluate performance on test data.
Additionally
- tests performance of generative model, i.e. rollout performance,
- creates a rollout gif.
Args:
step_counter (int): Current step.
start (int): Time at beginning of training
"""
self.stove.eval()
skip = self.c.skip
for i, data in enumerate(self.test_dataloader, 0):
now = time.time() - start
# Load Data
present = self.init_t(data['present_images'])
if self.c.action_conditioned:
actions = self.init_t(data['present_actions'])
future_actions = self.init_t(data['future_actions'])
future_rewards = self.init_t(data['future_rewards'])
else:
actions, future_actions, future_rewards = None, None, None
# Propagate through model
elbo, prop_dict, rewards = self.stove(
present,
self.c.plot_every,
actions,
self.c.supair_only
)
min_ll = -1.0 * elbo
if self.c.action_conditioned:
target_rewards = self.init_t(data['present_rewards'][:, skip:])
mse_rewards = self.reward_loss(
rewards.flatten(), target_rewards.flatten())
if self.c.debug_reward_rampup is not False:
reward_weight = min(
1, step_counter/self.c.debug_reward_rampup)
else:
reward_weight = 1
reward_factor = self.c.debug_reward_factor
min_ll = min_ll + reward_factor * reward_weight * mse_rewards
else:
mse_rewards = torch.Tensor([0])
# Log Errors
self.error_and_log(
elbo.item(), mse_rewards.item(), min_ll.item(), prop_dict, data,
step_counter, now, add='_roll')
if self.c.debug_core_appearance:
appearances = prop_dict['obj_appearances'][:, -1]
else:
appearances = None
z_pred, rewards_pred = self.stove.rollout(
prop_dict['z'][:, -1], actions=future_actions,
appearance=appearances)
if self.c.action_conditioned:
future_reward_loss = self.reward_loss(
rewards_pred.flatten(), future_rewards.flatten())
else:
future_reward_loss = 0
z_true = self.init_t(data['future_labels'])
error_dict = self.prediction_error(
z_pred[..., 2:], z_true)
perf_dict = {
'step': step_counter, 'time': now, 'elbo': elbo,
'reward': future_reward_loss}
perf_dict.update(error_dict)
other_keys = list(filter(lambda x: x[0] != 'z', list(prop_dict.keys())))
other_dict = {key: prop_dict[key] for key in other_keys}
perf_dict.update(other_dict)
perf_dict['type'] = 'rollout'
self.logger.performance(perf_dict)
if self.c.debug_test_mode:
break
if i > 7:
break
self.stove.train()
@torch.no_grad()
def long_rollout(self, idx=None, actions=None, step_counter=None):
"""Create one long rollout and save it as an animated GIF.
Args:
idx (list): Indexes of sequence in test data set.
actions (n, T): Pass actions different from those in the test
set to see if model has understood actions.
"""
self.stove.eval()
step = self.c.frame_step
vis = self.c.num_visible
batch_size = self.test_dataset.total_img.shape[0]
run_len = self.test_dataset.total_img.shape[1]
# repeat actions for action conditioned setting for long rollout
states_save_len = 500
skip = self.c.skip
max_rollout = self.c.num_frames // step - vis
if states_save_len > max_rollout:
print("Wrapping around actions for long rollouts.")
# idx of items for gifs
if idx is None:
idx = list(range(20))
np_total_images = self.test_dataset.total_img
np_total_labels = self.test_dataset.total_data
# apply step and batch size once
total_images = self.init_t(torch.tensor(
np_total_images[:batch_size, ::step]))
total_labels = self.init_t(torch.tensor(
np_total_labels[:batch_size, ::step]))
if self.c.action_conditioned:
if actions is None:
total_actions = self.test_dataset.total_actions
else:
total_actions = actions
total_actions = self.init_t(torch.tensor(
total_actions[:batch_size, ::step]))
action_input = total_actions[:, :vis]
total_rewards = self.test_dataset.total_rewards
total_rewards = total_rewards[:batch_size, ::step]
real_rewards = total_rewards[:, self.c.skip:, 0]
# need some actions for rollout
true_future_actions = total_actions[
:, vis:(vis+max_rollout)]
action_recon = total_actions[:, :(vis+max_rollout)]
else:
action_input = None
true_future_actions = None
action_recon = None
# first obtain reconstruction of input.
stove_input = total_images[:, :vis]
_, prop_dict2, rewards_recon = self.stove(
stove_input, self.c.plot_every, action_input)
z_recon = prop_dict2['z']
# use last state to do rollout
if self.c.debug_core_appearance:
appearances = prop_dict2['obj_appearances'][:, -1]
else:
appearances = None
z_pred, rewards_pred = self.stove.rollout(
z_recon[:, -1], num=states_save_len, actions=true_future_actions,
appearance=appearances)
# assemble complete sequences as concat of reconstruction and prediction
simu_recon = z_recon.detach()
simu_rollout = z_pred.detach()
simu = torch.cat([simu_recon, simu_rollout], 1)
# get prediction error over long sequence for loogging
real_labels = total_labels[:, skip:(vis+max_rollout)]
predicted_labels = simu[:, :(vis+max_rollout)-skip, :, 2:6]
error_dict = self.prediction_error(
predicted_labels, real_labels,
return_velocity=True, return_full=True, return_id_swaps=False)
for name, data in error_dict.items():
file = os.path.join(self.logger.exp_dir, '{}.csv'.format(name))
with open(file, 'a') as f:
f.write(','.join(['{:.6f}'.format(i) for i in data])+'\n')
# also get a reconstruction of z along entire sequence
stove_input = total_images[:, :(vis+max_rollout)]
elbo, prop_dict3, recon_reward_total = self.stove(
stove_input, self.c.plot_every, actions=action_recon)
recon = prop_dict3['z'].detach()
recon_reward_total = recon_reward_total.cpu().numpy()
if self.c.action_conditioned:
# add rewards to gif
rewards_model = torch.cat(
[rewards_recon, rewards_pred], 1).squeeze()
rewards_model = rewards_model.detach().cpu().numpy()
# log states from recon and reward
if step_counter is not None:
add = ['', '_{:05d}'.format(step_counter)]
else:
add = ['']
states_dir = self.logger.rollout_states_dir
save = lambda name, data, a: np.save(
os.path.join(
states_dir, '{}{}'.format(name, a)),
data.cpu().numpy() if isinstance(data, torch.Tensor) else data)
for a in add:
save('rollout_states', simu, a)
save('recon_states', recon, a)
if self.c.action_conditioned:
save('rollout_rewards', rewards_model, a)
save('recon_rewards', recon_reward_total, a)
save('real_states', real_labels, '')
if self.c.action_conditioned:
save('real_rewards', real_rewards, '')
# Make Gifs
gif_path = self.logger.rollout_gifs_dir
print("Make GIFs in {}".format(gif_path))
names = ['real', 'rollout', 'recon']
if self.c.channels != 3:
stove_input = bw_transform(stove_input)
gifs = [
stove_input[idx, skip:],
self.stove.reconstruct_from_z(simu[idx, :(vis+max_rollout)-skip]),
self.stove.reconstruct_from_z(recon[idx]),
]
gifs = [gif.detach().cpu().numpy() for gif in gifs]
if self.c.action_conditioned:
rewards = [
real_rewards[idx],
rewards_model[idx, :(vis+max_rollout)-skip],
recon_reward_total[idx]]
else:
rewards = len(gifs) * [None]
for gif, name, reward in zip(gifs, names, rewards):
gif = (255 * gif).reshape(
-1, self.c.channels, self.c.width, self.c.width).astype('uint8')
gif = np.squeeze(gif)
if reward is not None:
reward = reward.reshape(-1)
gif[reward < 0.5] = 255 - gif[reward < 0.5]
res = self.c.width
reward_annotations = get_reward_annotation(
reward, res=res, color=False)
reward_annotations = reward_annotations.astype('uint8')
gif = np.concatenate([reward_annotations, gif], 1)
gif = gif.astype('uint8')
imageio.mimsave(
os.path.join(gif_path, name+'.gif'), gif, fps=24)
print("Done")
# Set stove to train mode again.
self.stove.train()
|
# vim:fileencoding=utf-8:noet
import sys
if sys.version_info < (2, 7):
from unittest2 import TestCase, main # NOQA
from unittest2.case import SkipTest # NOQA
else:
from unittest import TestCase, main # NOQA
from unittest.case import SkipTest # NOQA
|
"""
:file: tracker.py
:author(s): Louis Cruz, Frank Chan, Orens Xhagolli
:description: Module that contains functions that poll the LeapMotion device
"""
import Leap #LeapMotion module
import math
def get_hand(controller, hand, log=False):
""" Hand handler.
:param controller: (Leap.Controller()) A Leap.Controller() instance to be passed.
:param log: (Boolean) Do you wish to keep a log of the function?
:return: (int) An integer from 1-12 representing the height of the hand detected.
"""
hands = controller.frame().hands
if hands.rightmost.is_right and hand is 'Right':
position = math.floor(hands.rightmost.palm_position.y//25)
elif hands.leftmost.is_left and hand is 'Left':
position = math.floor(hands.leftmost.palm_position.y//25)
else:
if log:
print(hand + " hand not detected, normal speed assumed.")
return 6
if log:
print(hand + " detected, height: "+ str(position)+'.')
if position > 12:
return 12
elif position < 1:
return 6
else:
return position
|
##
# Copyright 2021 IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
##
from .neuron import _NeuronActivation
"""
Static neuron activation function
"""
class _StaticActivation(_NeuronActivation):
pass
|
"""
GaleraCnf - file ``/etc/my.cnf.d/galera.cnf``
=============================================
This module provides parsing for the galera configuration of
MySQL. The input is the contents of the file
`/etc/my.cnf.d/galera.cnf`. Typical contents of the `galera.cnf`
file looks like this::
[client]
port = 3306
socket = /var/lib/mysql/mysql.sock
[isamchk]
key_buffer_size = 16M
[mysqld]
basedir = /usr
binlog_format = ROW
datadir = /var/lib/mysql
default-storage-engine = innodb
expire_logs_days = 10
innodb_autoinc_lock_mode = 2
innodb_locks_unsafe_for_binlog = 1
key_buffer_size = 16M
log-error = /var/log/mariadb/mariadb.log
max_allowed_packet = 16M
max_binlog_size = 100M
max_connections = 8192
wsrep_max_ws_rows = 131072
wsrep_max_ws_size = 1073741824
[mysqld_safe]
log-error = /var/log/mariadb/mariadb.log
nice = 0
socket = /var/lib/mysql/mysql.sock
[mysqldump]
max_allowed_packet = 16M
quick
quote-names
See the ``IniConfigFile`` base class for examples.
"""
from .. import parser, IniConfigFile
from insights.specs import Specs
@parser(Specs.galera_cnf)
class GaleraCnf(IniConfigFile):
"""Parses the content of `/etc/my.cnf.d/galera.cnf`."""
def parse_content(self, content, allow_no_value=True):
"""Calls parent method to parse contents but overrides parameters.
The galera config file may have keys with no value. This class
implements ``parse_content`` in order to pass the flag
``allow_no_value`` to the parent parser in order to allow parsing
of the no-value keys.
"""
super(GaleraCnf, self).parse_content(content, allow_no_value=allow_no_value)
|
from flask import Blueprint
main = Blueprint('main', __name__,
template_folder='templates',
static_folder='static',
static_url_path='/main')
from . import routes
|
import strax
import straxen
from straxen.get_corrections import get_correction_from_cmt
import numpy as np
import numba
from straxen.numbafied_scipy import numba_gammaln, numba_betainc
from scipy.special import loggamma
import tarfile
import tempfile
export, __all__ = strax.exporter()
@export
@strax.takes_config(
strax.Option('s1_optical_map', help='S1 (x, y, z) optical/pattern map.', infer_type=False,
default='XENONnT_s1_xyz_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl'),
strax.Option('s2_optical_map', help='S2 (x, y) optical/pattern map.', infer_type=False,
default='XENONnT_s2_xy_patterns_LCE_corrected_qes_MCva43fa9b_wires.pkl'),
strax.Option('s2_tf_model', help='S2 (x, y) optical data-driven model', infer_type=False,
default='XENONnT_s2_optical_map_data_driven_ML_v0_2021_11_25.tar.gz'),
strax.Option('s1_aft_map', help='Date drive S1 area fraction top map.', infer_type=False,
default='s1_aft_dd_xyz_XENONnT_Kr83m_41500eV_31Oct2021.json'),
strax.Option('mean_pe_per_photon', help='Mean of full VUV single photon response',
default=1.2, infer_type=False,),
strax.Option('gain_model', infer_type=False,
help='PMT gain model. Specify as (model_type, model_config)'),
strax.Option('n_tpc_pmts', type=int,
help='Number of TPC PMTs'),
strax.Option('n_top_pmts', type=int,
help='Number of top TPC PMTs'),
strax.Option('s1_min_area_pattern_fit', infer_type=False,
help='Skip EventPatternFit reconstruction if S1 area (PE) is less than this',
default=2),
strax.Option('s2_min_area_pattern_fit', infer_type=False,
help='Skip EventPatternFit reconstruction if S2 area (PE) is less than this',
default=10),
strax.Option('store_per_channel', default=False, type=bool,
help='Store normalized LLH per channel for each peak'),
strax.Option('max_r_pattern_fit', default=straxen.tpc_r, type=float,
help='Maximal radius of the peaks where llh calculation will be performed'),
strax.Option(name='electron_drift_velocity', infer_type=False,
help='Vertical electron drift velocity in cm/ns (1e4 m/ms)',
default=("electron_drift_velocity", "ONLINE", True)),
strax.Option(name='electron_drift_time_gate', infer_type=False,
help='Electron drift time from the gate in ns',
default=("electron_drift_time_gate", "ONLINE", True)),
)
class EventPatternFit(strax.Plugin):
'''
Plugin that provides patter information for events
'''
depends_on = ('event_area_per_channel', 'event_basics', 'event_positions')
provides = 'event_pattern_fit'
__version__ = '0.1.1'
def infer_dtype(self):
dtype = [('s2_2llh', np.float32,
'Modified Poisson likelihood value for main S2 in the event'),
('s2_neural_2llh', np.float32,
'Data-driven based likelihood value for main S2 in the event'),
('alt_s2_2llh', np.float32,
'Modified Poisson likelihood value for alternative S2'),
('alt_s2_neural_2llh', np.float32,
'Data-driven based likelihood value for alternative S2 in the event'),
('s1_2llh', np.float32,
'Modified Poisson likelihood value for main S1'),
('s1_top_2llh', np.float32,
'Modified Poisson likelihood value for main S1, calculated from top array'),
('s1_bottom_2llh', np.float32,
'Modified Poisson likelihood value for main S1, calculated from bottom array'),
('s1_area_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for S1 area fraction top'),
('s1_area_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for S1 area fraction top'),
('s1_photon_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for S1 photon fraction top'),
('s1_photon_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for S1 photon fraction top'),
('alt_s1_area_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for alternative S1 area fraction top'),
('alt_s1_area_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for alternative S1 area fraction top'),
('alt_s1_photon_fraction_top_continuous_probability', np.float32,
'Continuous binomial test for alternative S1 photon fraction top'),
('alt_s1_photon_fraction_top_discrete_probability', np.float32,
'Discrete binomial test for alternative S1 photon fraction top')]
if self.config['store_per_channel']:
dtype += [
(('2LLH per channel for main S2', 's2_2llh_per_channel'),
np.float32, (self.config['n_top_pmts'], )),
(('2LLH per channel for alternative S2', 'alt_s2_2llh_per_channel'),
np.float32, (self.config['n_top_pmts'], )),
(('Pattern main S2', 's2_pattern'),
np.float32, (self.config['n_top_pmts'], )),
(('Pattern alt S2', 'alt_s2_pattern'),
np.float32, (self.config['n_top_pmts'], )),
(('Pattern for main S1', 's1_pattern'),
np.float32, (self.config['n_tpc_pmts'], )),
(('2LLH per channel for main S1', 's1_2llh_per_channel'),
np.float32, (self.config['n_tpc_pmts'], )),
]
dtype += strax.time_fields
return dtype
def setup(self):
self.electron_drift_velocity = get_correction_from_cmt(self.run_id, self.config['electron_drift_velocity'])
self.electron_drift_time_gate = get_correction_from_cmt(self.run_id, self.config['electron_drift_time_gate'])
self.mean_pe_photon = self.config['mean_pe_per_photon']
# Getting S1 AFT maps
self.s1_aft_map = straxen.InterpolatingMap(
straxen.get_resource(
self.config['s1_aft_map'],
fmt=self._infer_map_format(self.config['s1_aft_map'])))
# Getting optical maps
self.s1_pattern_map = straxen.InterpolatingMap(
straxen.get_resource(
self.config['s1_optical_map'],
fmt=self._infer_map_format(self.config['s1_optical_map'])))
self.s2_pattern_map = straxen.InterpolatingMap(
straxen.get_resource(
self.config['s2_optical_map'],
fmt=self._infer_map_format(self.config['s2_optical_map'])))
# Getting S2 data-driven tensorflow models
downloader = straxen.MongoDownloader()
self.model_file = downloader.download_single(self.config['s2_tf_model'])
with tempfile.TemporaryDirectory() as tmpdirname:
tar = tarfile.open(self.model_file, mode="r:gz")
tar.extractall(path=tmpdirname)
import tensorflow as tf
def _logl_loss(patterns_true, likelihood):
return likelihood / 10.
self.model = tf.keras.models.load_model(tmpdirname,
custom_objects={"_logl_loss": _logl_loss})
self.model_chi2 = tf.keras.Model(self.model.inputs,
self.model.get_layer('Likelihood').output)
# Getting gain model to get dead PMTs
self.to_pe = straxen.get_correction_from_cmt(self.run_id, self.config['gain_model'])
self.dead_PMTs = np.where(self.to_pe == 0)[0]
self.pmtbool = ~np.in1d(np.arange(0, self.config['n_tpc_pmts']), self.dead_PMTs)
self.pmtbool_top = self.pmtbool[:self.config['n_top_pmts']]
self.pmtbool_bottom = self.pmtbool[self.config['n_top_pmts']:self.config['n_tpc_pmts']]
def compute(self, events):
result = np.zeros(len(events), dtype=self.dtype)
result['time'] = events['time']
result['endtime'] = strax.endtime(events)
# Computing LLH values for S1s
self.compute_s1_llhvalue(events, result)
# Computing LLH values for S2s
self.compute_s2_llhvalue(events, result)
# Computing chi2 values for S2s
self.compute_s2_neural_llhvalue(events, result)
# Computing binomial test for s1 area fraction top
positions = np.vstack([events['x'], events['y'], events['z']]).T
aft_prob = self.s1_aft_map(positions)
alt_s1_interaction_drift_time = events['s2_center_time']-events['alt_s1_center_time']
alt_s1_interaction_z = -self.electron_drift_velocity*(alt_s1_interaction_drift_time-self.electron_drift_time_gate)
alt_positions = np.vstack([events['x'], events['y'], alt_s1_interaction_z]).T
alt_aft_prob = self.s1_aft_map(alt_positions)
# main s1 events
mask_s1 = ~np.isnan(aft_prob)
mask_s1 &= ~np.isnan(events['s1_area'])
mask_s1 &= ~np.isnan(events['s1_area_fraction_top'])
# default value is nan, it will be overwrite if the event satisfy the requirements
result['s1_area_fraction_top_continuous_probability'][:] = np.nan
result['s1_area_fraction_top_discrete_probability'][:] = np.nan
result['s1_photon_fraction_top_continuous_probability'][:] = np.nan
result['s1_photon_fraction_top_discrete_probability'][:] = np.nan
# compute binomial test only if we have events that have valid aft prob, s1 area and s1 aft
if np.sum(mask_s1):
arg = aft_prob[mask_s1], events['s1_area'][mask_s1], events['s1_area_fraction_top'][mask_s1]
result['s1_area_fraction_top_continuous_probability'][mask_s1] = s1_area_fraction_top_probability(*arg)
result['s1_area_fraction_top_discrete_probability'][mask_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
arg = aft_prob[mask_s1], events['s1_area'][mask_s1]/self.config['mean_pe_per_photon'], events['s1_area_fraction_top'][mask_s1]
result['s1_photon_fraction_top_continuous_probability'][mask_s1] = s1_area_fraction_top_probability(*arg)
result['s1_photon_fraction_top_discrete_probability'][mask_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
# alternative s1 events
mask_alt_s1 = ~np.isnan(alt_aft_prob)
mask_alt_s1 &= ~np.isnan(events['alt_s1_area'])
mask_alt_s1 &= ~np.isnan(events['alt_s1_area_fraction_top'])
# default value is nan, it will be ovewrite if the event satisfy the requirments
result['alt_s1_area_fraction_top_continuous_probability'][:] = np.nan
result['alt_s1_area_fraction_top_discrete_probability'][:] = np.nan
result['alt_s1_photon_fraction_top_continuous_probability'][:] = np.nan
result['alt_s1_photon_fraction_top_discrete_probability'][:] = np.nan
# compute binomial test only if we have events that have valid aft prob, alt s1 area and alt s1 aft
if np.sum(mask_alt_s1):
arg = aft_prob[mask_alt_s1], events['alt_s1_area'][mask_alt_s1], events['alt_s1_area_fraction_top'][mask_alt_s1]
result['alt_s1_area_fraction_top_continuous_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg)
result['alt_s1_area_fraction_top_discrete_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
arg = aft_prob[mask_alt_s1], events['alt_s1_area'][mask_alt_s1]/self.config['mean_pe_per_photon'], events['alt_s1_area_fraction_top'][mask_alt_s1]
result['alt_s1_photon_fraction_top_continuous_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg)
result['alt_s1_photon_fraction_top_discrete_probability'][mask_alt_s1] = s1_area_fraction_top_probability(*arg, 'discrete')
return result
def compute_s1_llhvalue(self, events, result):
# Selecting S1s for pattern fit calculation
# - must exist (index != -1)
# - must have total area larger minimal one
# - must have positive AFT
x, y, z = events['x'], events['y'], events['z']
cur_s1_bool = events['s1_area']>self.config['s1_min_area_pattern_fit']
cur_s1_bool &= events['s1_index']!=-1
cur_s1_bool &= events['s1_area_fraction_top']>=0
cur_s1_bool &= np.isfinite(x)
cur_s1_bool &= np.isfinite(y)
cur_s1_bool &= np.isfinite(z)
cur_s1_bool &= (x**2 + y**2) < self.config['max_r_pattern_fit']**2
# default value is nan, it will be ovewrite if the event satisfy the requirments
result['s1_2llh'][:] = np.nan
result['s1_top_2llh'][:] = np.nan
result['s1_bottom_2llh'][:] = np.nan
# Making expectation patterns [ in PE ]
if np.sum(cur_s1_bool):
s1_map_effs = self.s1_pattern_map(np.array([x, y, z]).T)[cur_s1_bool, :]
s1_area = events['s1_area'][cur_s1_bool]
s1_pattern = s1_area[:, None]*(s1_map_effs[:, self.pmtbool])/np.sum(s1_map_effs[:, self.pmtbool], axis=1)[:, None]
s1_pattern_top = (events['s1_area_fraction_top'][cur_s1_bool]*s1_area)
s1_pattern_top = s1_pattern_top[:, None]*((s1_map_effs[:, :self.config['n_top_pmts']])[:, self.pmtbool_top])
s1_pattern_top /= np.sum((s1_map_effs[:, :self.config['n_top_pmts']])[:, self.pmtbool_top], axis=1)[:, None]
s1_pattern_bottom = ((1-events['s1_area_fraction_top'][cur_s1_bool])*s1_area)
s1_pattern_bottom = s1_pattern_bottom[:, None]*((s1_map_effs[:, self.config['n_top_pmts']:])[:, self.pmtbool_bottom])
s1_pattern_bottom /= np.sum((s1_map_effs[:, self.config['n_top_pmts']:])[:, self.pmtbool_bottom], axis=1)[:, None]
# Getting pattern from data
s1_area_per_channel_ = events['s1_area_per_channel'][cur_s1_bool,:]
s1_area_per_channel = s1_area_per_channel_[:, self.pmtbool]
s1_area_per_channel_top = (s1_area_per_channel_[:, :self.config['n_top_pmts']])[:, self.pmtbool_top]
s1_area_per_channel_bottom = (s1_area_per_channel_[:, self.config['n_top_pmts']:])[:, self.pmtbool_bottom]
# Top and bottom
arg1 = s1_pattern/self.mean_pe_photon, s1_area_per_channel, self.mean_pe_photon
arg2 = s1_area_per_channel/self.mean_pe_photon, s1_area_per_channel, self.mean_pe_photon
norm_llh_val = (neg2llh_modpoisson(*arg1) - neg2llh_modpoisson(*arg2))
result['s1_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)
# If needed to stire - store only top and bottom array, but not together
if self.config['store_per_channel']:
# Storring pattern information
store_patterns = np.zeros((s1_pattern.shape[0], self.config['n_tpc_pmts']) )
store_patterns[:, self.pmtbool] = s1_pattern
result['s1_pattern'][cur_s1_bool] = store_patterns
# Storing actual LLH values
store_2LLH_ch = np.zeros((norm_llh_val.shape[0], self.config['n_tpc_pmts']) )
store_2LLH_ch[:, self.pmtbool] = norm_llh_val
result['s1_2llh_per_channel'][cur_s1_bool] = store_2LLH_ch
# Top
arg1 = s1_pattern_top/self.mean_pe_photon, s1_area_per_channel_top, self.mean_pe_photon
arg2 = s1_area_per_channel_top/self.mean_pe_photon, s1_area_per_channel_top, self.mean_pe_photon
norm_llh_val = (neg2llh_modpoisson(*arg1) - neg2llh_modpoisson(*arg2))
result['s1_top_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)
# Bottom
arg1 = s1_pattern_bottom/self.mean_pe_photon, s1_area_per_channel_bottom, self.mean_pe_photon
arg2 = s1_area_per_channel_bottom/self.mean_pe_photon, s1_area_per_channel_bottom, self.mean_pe_photon
norm_llh_val = (neg2llh_modpoisson(*arg1) - neg2llh_modpoisson(*arg2))
result['s1_bottom_2llh'][cur_s1_bool] = np.sum(norm_llh_val, axis=1)
def compute_s2_llhvalue(self, events, result):
for t_ in ['s2', 'alt_s2']:
# Selecting S2s for pattern fit calculation
# - must exist (index != -1)
# - must have total area larger minimal one
# - must have positive AFT
x, y = events[t_+'_x'], events[t_+'_y']
s2_mask = (events[t_+'_area']>self.config['s2_min_area_pattern_fit'])
s2_mask &= (events[t_+'_area_fraction_top']>0)
s2_mask &= (x**2 + y**2) < self.config['max_r_pattern_fit']**2
# default value is nan, it will be ovewrite if the event satisfy the requirments
result[t_+'_2llh'][:] = np.nan
# Making expectation patterns [ in PE ]
if np.sum(s2_mask):
s2_map_effs = self.s2_pattern_map(np.array([x, y]).T)[s2_mask, 0:self.config['n_top_pmts']]
s2_map_effs = s2_map_effs[:, self.pmtbool_top]
s2_top_area = (events[t_+'_area_fraction_top']*events[t_+'_area'])[s2_mask]
s2_pattern = s2_top_area[:, None]*s2_map_effs/np.sum(s2_map_effs, axis=1)[:,None]
# Getting pattern from data
s2_top_area_per_channel = events[t_+'_area_per_channel'][s2_mask, 0:self.config['n_top_pmts']]
s2_top_area_per_channel = s2_top_area_per_channel[:, self.pmtbool_top]
# Calculating LLH, this is shifted Poisson
# we get area expectation and we need to scale them to get
# photon expectation
norm_llh_val = (neg2llh_modpoisson(
mu = s2_pattern/self.mean_pe_photon,
areas = s2_top_area_per_channel,
mean_pe_photon=self.mean_pe_photon)
-
neg2llh_modpoisson(
mu = s2_top_area_per_channel/self.mean_pe_photon,
areas = s2_top_area_per_channel,
mean_pe_photon=self.mean_pe_photon)
)
result[t_+'_2llh'][s2_mask] = np.sum(norm_llh_val, axis=1)
if self.config['store_per_channel']:
store_patterns = np.zeros((s2_pattern.shape[0], self.config['n_top_pmts']) )
store_patterns[:, self.pmtbool_top] = s2_pattern
result[t_+'_pattern'][s2_mask] = store_patterns#:s2_pattern[s2_mask]
store_2LLH_ch = np.zeros((norm_llh_val.shape[0], self.config['n_top_pmts']) )
store_2LLH_ch[:, self.pmtbool_top] = norm_llh_val
result[t_+'_2llh_per_channel'][s2_mask] = store_2LLH_ch
def compute_s2_neural_llhvalue(self, events, result):
for t_ in ['s2', 'alt_s2']:
x, y = events[t_ + '_x'], events[t_ + '_y']
s2_mask = (events[t_ + '_area'] > self.config['s2_min_area_pattern_fit'])
s2_mask &= (events[t_ + '_area_fraction_top'] > 0)
# default value is nan, it will be ovewrite if the event satisfy the requirements
result[t_ + '_neural_2llh'][:] = np.nan
# Produce position and top pattern to feed tensorflow model, return chi2/N
if np.sum(s2_mask):
s2_pos = np.stack((x, y)).T[s2_mask]
s2_pat = events[t_ + '_area_per_channel'][s2_mask, 0:self.config['n_top_pmts']]
# Output[0]: loss function, -2*log-likelihood, Output[1]: chi2
result[t_ + '_neural_2llh'][s2_mask] = self.model_chi2.predict({'xx': s2_pos, 'yy': s2_pat})[1]
@staticmethod
def _infer_map_format(map_name, known_formats=('pkl', 'json', 'json.gz')):
for fmt in known_formats:
if map_name.endswith(fmt):
return fmt
raise ValueError(f'Extension of {map_name} not in {known_formats}')
def neg2llh_modpoisson(mu=None, areas=None, mean_pe_photon=1.0):
"""
Modified poisson distribution with proper normalization for shifted poisson.
mu - expected number of photons per channel
areas - observed areas per channel
mean_pe_photon - mean of area responce for one photon
"""
with np.errstate(divide='ignore', invalid='ignore'):
fraction = areas/mean_pe_photon
res = 2.*(mu -
(fraction)*np.log(mu) +
loggamma((fraction)+1) +
np.log(mean_pe_photon)
)
is_zero = areas <= 0 # If area equals or smaller than 0 - assume 0
res[is_zero] = 2.*mu[is_zero]
# if zero channel has negative expectation, assume LLH to be 0 there
# this happens in the normalization factor calculation when mu is received from area
neg_mu = mu < 0.0
res[is_zero | neg_mu] = 0.0
return res
# continuous and discrete binomial test
@numba.njit
def lbinom_pmf(k, n, p):
"""Log of binomial probability mass function approximated with gamma function"""
scale_log = numba_gammaln(n + 1) - numba_gammaln(n - k + 1) - numba_gammaln(k + 1)
ret_log = scale_log + k * np.log(p) + (n - k) * np.log(1 - p)
return ret_log
@numba.njit
def binom_pmf(k, n, p):
"""Binomial probability mass function approximated with gamma function"""
return np.exp(lbinom_pmf(k, n, p))
@numba.njit
def binom_cdf(k, n, p):
if k >= n:
return 1.0
return numba_betainc(n - k, k + 1, 1.0 - p)
@numba.njit
def binom_sf(k, n, p):
return 1 - binom_cdf(k, n, p)
@numba.njit
def lbinom_pmf_diriv(k, n, p, dk=1e-7):
"""Numerical dirivitive of Binomial pmf approximated with gamma function"""
if k + dk < n:
return (lbinom_pmf(k + dk, n, p) - lbinom_pmf(k, n, p)) / dk
else:
return (lbinom_pmf(k - dk, n, p) - lbinom_pmf(k, n, p)) / - dk
@numba.njit(cache=True)
def _numeric_derivative(y0, y1, err, target, x_min, x_max, x0, x1):
"""Get close to <target> by doing a numeric derivative"""
if abs(y1 - y0) < err:
# break by passing dx == 0
return 0., x1, x1
x = (target - y0) / (y1 - y0) * (x1 - x0) + x0
x = min(x, x_max)
x = max(x, x_min)
dx = abs(x - x1)
x0 = x1
x1 = x
return dx, x0, x1
@numba.njit
def lbinom_pmf_mode(x_min, x_max, target, args, err=1e-7, max_iter=50):
"""Find the root of the derivative of log Binomial pmf with secant method"""
x0 = x_min
x1 = x_max
dx = abs(x1 - x0)
while (dx > err) and (max_iter > 0):
y0 = lbinom_pmf_diriv(x0, *args)
y1 = lbinom_pmf_diriv(x1, *args)
dx, x0, x1 = _numeric_derivative(y0, y1, err, target, x_min, x_max, x0, x1)
max_iter -= 1
return x1
@numba.njit
def lbinom_pmf_inverse(x_min, x_max, target, args, err=1e-7, max_iter=50):
"""Find the where the log Binomial pmf cross target with secant method"""
x0 = x_min
x1 = x_max
dx = abs(x1 - x0)
while (dx > err) and (max_iter > 0):
y0 = lbinom_pmf(x0, *args)
y1 = lbinom_pmf(x1, *args)
dx, x0, x1 = _numeric_derivative(y0, y1, err, target, x_min, x_max, x0, x1)
max_iter -= 1
return x1
@numba.njit
def binom_test(k, n, p):
"""
The main purpose of this algorithm is to find the value j on the
other side of the mode that has the same probability as k, and
integrate the tails outward from k and j. In the case where either
k or j are zero, only the non-zero tail is integrated.
"""
mode = lbinom_pmf_mode(0, n, 0, (n, p))
if k <= mode:
j_min, j_max = mode, n
else:
j_min, j_max = 0, mode
target = lbinom_pmf(k, n, p)
j = lbinom_pmf_inverse(j_min, j_max, target, (n, p))
pval = 0
if min(k, j) > 0:
pval += binom_cdf(min(k, j), n, p)
if max(k, j) > 0:
pval += binom_sf(max(k, j), n, p)
pval = min(1.0, pval)
return pval
@np.vectorize
@numba.njit
def s1_area_fraction_top_probability(aft_prob, area_tot, area_fraction_top, mode='continuous'):
"""Function to compute the S1 AFT probability"""
area_top = area_tot * area_fraction_top
# Raise a warning in case one of these three condition is verified
# and return binomial test equal to nan since they are not physical
# k: size_top, n: size_tot, p: aft_prob
do_test = True
if area_tot < area_top:
# warnings.warn(f'n {area_tot} must be >= k {area_top}')
binomial_test = np.nan
do_test = False
if (aft_prob > 1.0) or (aft_prob < 0.0):
# warnings.warn(f'p {aft_prob} must be in range [0, 1]')
binomial_test = np.nan
do_test = False
if area_top < 0:
# warnings.warn(f'k {area_top} must be >= 0')
binomial_test = np.nan
do_test = False
if do_test:
if mode == 'discrete':
binomial_test = binom_pmf(area_top, area_tot, aft_prob)
else:
binomial_test = binom_test(area_top, area_tot, aft_prob)
return binomial_test
|
import numpy as np
import matplotlib.pyplot as plt
from awave.experimental.filters import gabor_filter, edge_filter, curve_filter
from awave.experimental.filters_agg import *
import awave.experimental.viz as viz
from tqdm import tqdm
from pytorch_lightning.core.lightning import LightningModule
import torchmetrics
import logging
import cifar10
from torch import nn
import torch
from torch.nn import functional as F
import torch.optim as optim
import util
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
class LinearClassifier(LightningModule):
def __init__(self, input_size=10368, output_size=10):
super().__init__()
self.fc1 = nn.Linear(input_size, output_size)
def forward(self, X):
X = self.fc1(X)
return X
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
val_loss = F.cross_entropy(logits, y)
self.log("val_loss", val_loss.item())
preds = logits.softmax(dim=-1)
acc = torchmetrics.functional.accuracy(preds, y)
self.log("val_acc", acc)
return val_loss
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=1e-3)
if __name__ == '__main__':
# specify the features
W_conv2d0 = make_weights(7,
[("color", i) for i in range(3)],
[("gabor", orientation, offset)
for orientation in range(0, 180, 5)
for offset in [0, 7./8., 7./4, 7.*3/8.]]
)
conv2d0 = nn.Conv2d(in_channels=3, out_channels=W_conv2d0.shape[-1], kernel_size=W_conv2d0.shape[0])
conv2d0.weight.value = torch.Tensor(W_conv2d0.transpose())
conv2d0.bias.value = 0
pool2d0 = nn.MaxPool2d(kernel_size=5, stride=4, padding=0)
feat_extractor = nn.Sequential(conv2d0, pool2d0)
# load data
# this is too big for gpu
print('loading data...')
X, Y = cifar10.get_batch(batch_size=50000, train=True) # X is 1, 3, 32, 32
X_test, Y_test = cifar10.get_batch(batch_size=10000, train=False) # X is 1, 3, 32, 32
# extract feats
print('extracting feats...')
with torch.no_grad():
feats = feat_extractor(X).detach()
feats = feats.reshape(feats.shape[0], -1)
feats_test = feat_extractor(X_test).detach()
feats_test = feats_test.reshape(feats_test.shape[0], -1)
print('\tfeat shape', feats.shape)
# set up dataloaders
train_feats_loader = cifar10.create_dataloader(feats, Y, batch_size=1000)
test_feats_loader = cifar10.create_dataloader(feats_test, Y_test, batch_size=1000)
# train
print('training...')
device = 'cuda'
logger = CSVLogger("logs", name="my_exp_name")
model = LinearClassifier(input_size=feats.shape[1]).to(device)
trainer = Trainer(gpus=1, logger=logger, callbacks=[EarlyStopping(monitor="val_loss")])
trainer.fit(model, train_feats_loader, test_feats_loader)
|
import os
import re
import sys
from io import BytesIO
from distutils import log
from distutils.cmd import Command
from subprocess import check_output
from distutils.errors import DistutilsError
def simple_call(cmd):
return check_output(cmd.split(" "))
class SimpleCommand(Command):
"""Default behavior for simple commands
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def install_requires(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(
self.distribution.tests_require)
def run(self):
self.install_requires()
self._run()
class IncrementSemanticVersion(SimpleCommand):
"""Increment Semantic Version and Commmit to Git
Version incrementing uses semantic versioning. This command accepts -M or
--major, -m or --minor to increment a major or minor release. If no flags
are passed then a patch release is created.
"""
user_options = [
("major", "M", "increment version for major release"),
("minor", "m", "increment version for minor release"),
]
boolean_options = ("major", "minor")
def initialize_options(self):
self.major = False
self.minor = False
def _new_version(self, version):
major, minor, patch = [int(i) for i in version.split(".")]
if self.major:
return "{}.0.0".format(major + 1)
elif self.minor:
return "{}.{}.0".format(major, minor + 1)
else:
return "{}.{}.{}".format(major, minor, patch + 1)
def _update_version(self):
pattern = re.compile('^(\s+)version="([0-9\.]+)"')
output = BytesIO()
with open("setup.py", "r") as fp:
for line in fp:
result = pattern.match(line)
if not result:
output.write(line)
else:
spaces, version = result.groups()
new_version = self._new_version(version)
output.write(
'{}version="{}",\n'.format(spaces, new_version))
with open("setup.py", "w") as fp:
fp.write(output.getvalue())
return new_version
def _run(self):
if simple_call("git status --porcelain").strip():
raise DistutilsError("Uncommited changes, "
"commit all changes before release")
new_version = self._update_version()
self.distribution.metadata.version = new_version
check_output([
"git", "commit", "-a", "-m", "Release {}".format(new_version)])
simple_call("git tag release-{}".format(new_version))
class GitPush(SimpleCommand):
"""Push changes and tags to git origin
"""
description = "push changes to git origin"
def _run(self):
simple_call("git push origin master")
simple_call("git push --tags")
class TestsWithCoverage(SimpleCommand):
"""Run Unit Tests with Coverage
"""
description = "run unit tests with coverage"
def _run(self):
from coverage import coverage
cov = coverage(data_file=".coverage", branch=True,
source=self.distribution.packages)
cov.start()
# Unittest calls exit. How naughty.
try:
self.run_command("test")
except SystemExit:
pass
cov.stop()
cov.xml_report(outfile="coverage.xml")
cov.html_report()
class PEP8CheckStyle(SimpleCommand):
"""Run PEP8 Code Style Valiation
"""
description = "run PEP8 style validations"
def _run(self):
from pep8 import StyleGuide
self.run_command("egg_info")
files = self.get_finalized_command("egg_info")
report = StyleGuide().check_files([
p for p in files.filelist.files if p.endswith(".py")])
if report.total_errors:
raise DistutilsError(
"Found {} PEP8 violations".format(report.total_errors))
else:
log.info("No PEP8 violations found")
|
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
"""IPX Socket Types"""
###############################################################################
# NOTE: fix duplicated name of ``socket```
import sys
path = sys.path.pop(0)
###############################################################################
import collections
import re
from typing import TYPE_CHECKING
import bs4
from pcapkit.vendor.default import Vendor
if TYPE_CHECKING:
from collections import Counter
from bs4 import BeautifulSoup
###############################################################################
sys.path.insert(0, path)
###############################################################################
__all__ = ['Socket']
class Socket(Vendor):
"""Socket Types"""
#: Value limit checker.
FLAG = 'isinstance(value, int) and 0x0000 <= value <= 0xFFFF'
#: Link to registry.
LINK = 'https://en.wikipedia.org/wiki/Internetwork_Packet_Exchange#Socket_number'
def count(self, data: 'BeautifulSoup') -> 'Counter[str]':
"""Count field records.
Args:
data: Registry data.
Returns:
Field recordings.
"""
return collections.Counter()
def request(self, text: 'str') -> 'BeautifulSoup': # type: ignore[override] # pylint: disable=signature-differs
"""Fetch HTML source.
Args:
text: Context from :attr:`~Vendor.LINK`.
Returns:
Parsed HTML source.
"""
return bs4.BeautifulSoup(text, 'html5lib')
def process(self, soup: 'BeautifulSoup') -> 'tuple[list[str], list[str]]': # pylint: disable=arguments-differ
"""Process HTML source.
Args:
data: Parsed HTML source.
Returns:
Enumeration fields and missing fields.
"""
table = soup.find_all('table', class_='wikitable')[3]
content = filter(lambda item: isinstance(item, bs4.element.Tag), table.tbody) # pylint: disable=filter-builtin-not-iterating
next(content) # header
enum = [] # type: list[str]
miss = [] # type: list[str]
for item in content:
line = item.find_all('td')
pval = ' '.join(line[0].stripped_strings)
dscp = ' '.join(line[1].stripped_strings)
data = list(filter(None, map(lambda s: s.strip(), re.split(r'\W*,|\(|\)\W*', dscp))))
if len(data) == 2:
name, desc = data
else:
name, desc = dscp, ''
renm = self.safe_name(name)
tmp1 = f', {desc}' if desc else ''
desc = self.wrap_comment(f'{name}{tmp1}')
try:
code, _ = pval, int(pval, base=16)
pres = f"{renm} = {code}"
sufs = f'#: {desc}'
# if len(pres) > 74:
# sufs = f"\n{' '*80}{sufs}"
# enum.append(f'{pres.ljust(76)}{sufs}')
enum.append(f'{sufs}\n {pres}')
except ValueError:
start, stop = pval.split('–')
miss.append(f'if {start} <= value <= {stop}:')
miss.append(f' #: {desc}')
miss.append(f" extend_enum(cls, '{name}_0x%s' % hex(value)[2:].upper().zfill(4), value)")
miss.append(' return cls(value)')
return enum, miss
if __name__ == "__main__":
Socket()
|
from pylab import *
import os
from os.path import join
import pickle
dirname = "dqn_atari"
data = pickle.load(open(os.path.join(dirname, 'Q1_huber_loss_delta=2.pkl'),'rb'))
fig, ax = plt.subplots(figsize=(25,20))
ax.set_title("Graph representing Q-Learning results on Pong for Huber_Loss delta = 2")
ax.set_xlabel("Iteration")
ax.set_ylabel("Return")
plot(data['time_step_log'], data['best_mean_episode_reward_log'], label="best_mean_episode_reward")
plot(data['time_step_log'], data['mean_episode_reward_log'], label="mean_episode_reward")
ax.legend(prop={'size': 10}).draggable()
plt.savefig("figs/Q1_huber_loss_delta=2.png")
#plt.show()
# Compare dqn and vanilla
data_vanilla = pickle.load(open('dqn_atari/Q1_huber_loss_delta=2.pkl', 'rb'))
data_dqn = pickle.load(open('dqn_atari/personal_log_atari_double_q_learning_huber_delta=2.pkl', 'rb'))
fig, ax = plt.subplots(figsize=(25,20))
ax.set_title("Graph comparing Vanilla and Double Q-Learning results on Pong for Huber_Loss delta = 2")
ax.set_xlabel("Iteration")
ax.set_ylabel("Return")
plot(data_vanilla['time_step_log'], data_vanilla['best_mean_episode_reward_log'], label="vanilla_best_mean_episode_reward")
plot(data_vanilla['time_step_log'], data_vanilla['mean_episode_reward_log'], label="vanilla_mean_episode_reward")
plot(data_dqn['time_step_log'], data_dqn['best_mean_episode_reward_log'], label="dqn_best_mean_episode_reward")
plot(data_dqn['time_step_log'], data_dqn['mean_episode_reward_log'], label="dqn_mean_episode_reward")
ax.legend(prop={'size': 10}).draggable()
plt.savefig("figs/Q2_DQN_Vanillahuber_loss_delta=2.png")
dirname = "data_aws"
filenames = os.listdir(dirname)
fig, ax = plt.subplots(figsize=(25,20))
ax.set_title("Graph representing Doube Q-Learning results on Pong for different value of Huber_Loss delta ")
ax.set_xlabel("Iteration")
ax.set_ylabel("Return")
for filename in filenames:
if not ("personal_log" in filename):
continue
print(filename)
data = pickle.load(open(os.path.join(dirname, filename), 'rb'))
plot(data['time_step_log'], data['best_mean_episode_reward_log'], label="best_mean_episode_reward" + filename[37:-4])
plot(data['time_step_log'], data['mean_episode_reward_log'], label="mean_episode_reward" + filename[37:-4])
ax.legend(prop={'size': 10}).draggable()
plt.savefig("figs/dqn_huber_loss_parameter_comparison.png")
|
"""Utilities for translating ORF detection
"""
import warnings
from collections import Counter
from collections import defaultdict
import pysam
from tqdm import *
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import pandas as pd
from .fasta import FastaReader
from .gtf import GTFReader
from .interval import Interval
from .common import is_read_uniq_mapping
from .common import merge_intervals
from .common import cal_periodicity
from .infer_protocol import infer_protocol
class PutativeORF:
"""Class for putative ORF."""
def __init__(
self,
category,
transcript_id,
transcript_type,
gene_id,
gene_name,
gene_type,
chrom,
strand,
intervals,
seq="",
leader="",
trailer="",
):
self.category = category
self.tid = transcript_id
self.ttype = transcript_type
self.gid = gene_id
self.gname = gene_name
self.gtype = gene_type
self.chrom = chrom
self.strand = strand
self.intervals = sorted(intervals, key=lambda x: x.start)
start = self.intervals[0].start
end = self.intervals[-1].end
self.seq = seq
self.oid = "{}_{}_{}_{}".format(transcript_id, start, end, len(seq))
self.leader = leader
self.trailer = trailer
@property
def start_codon(self):
if len(self.seq) < 3:
return None
return self.seq[:3]
@classmethod
def from_string(cls, line):
"""
Parameters
----------
line: string
line for annotation file generated by prepare_orfs
"""
if not line:
print("annotation line cannot be empty")
return None
fields = line.split("\t")
if len(fields) != 13:
print("unexpected number of columns found for annotation file")
return None
oid = fields[0]
category = fields[1]
tid = fields[2]
ttype = fields[3]
gid = fields[4]
gname = fields[5]
gtype = fields[6]
chrom = fields[7]
strand = fields[8]
coordinate = fields[9]
intervals = []
for group in coordinate.split(","):
start, end = group.split("-")
start = int(start)
end = int(end)
intervals.append(Interval(chrom, start, end, strand))
seq = fields[10]
leader = fields[11]
trailer = fields[12]
return cls(category, tid, ttype, gid, gname, gtype, chrom, strand, intervals)
@classmethod
def from_tracks(cls, tracks, category, seq="", leader="", trailer=""):
"""
Parameters
----------
tracks: list of GTFTrack
"""
if not tracks:
return None
intervals = []
tid = set()
ttype = set()
gid = set()
gname = set()
gtype = set()
chrom = set()
strand = set()
for track in tracks:
try:
tid.add(track.transcript_id)
ttype.add(track.transcript_type)
gid.add(track.gene_id)
gname.add(track.gene_name)
gtype.add(track.gene_type)
chrom.add(track.chrom)
strand.add(track.strand)
intervals.append(
Interval(track.chrom, track.start, track.end, track.strand)
)
except AttributeError:
print(
"missing attribute {}:{}-{}".format(
track.chrom, track.start, track.end
)
)
return None
if (
len(tid) != 1
or len(ttype) != 1
or len(gid) != 1
or len(gname) != 1
or len(gtype) != 1
or len(chrom) != 1
or len(strand) != 1
):
print("inconsistent tracks for one ORF")
return None
tid = list(tid)[0]
ttype = list(ttype)[0]
gid = list(gid)[0]
gname = list(gname)[0]
gtype = list(gtype)[0]
chrom = list(chrom)[0]
strand = list(strand)[0]
return cls(
category,
tid,
ttype,
gid,
gname,
gtype,
chrom,
strand,
intervals,
seq,
leader,
trailer,
)
def tracks_to_ivs(tracks):
"""
Parameters
----------
tracks: List[GTFTrack]
list of gtf tracks
Returns
-------
intervals: List[Interval]
list of Interval
"""
chrom = {track.chrom for track in tracks}
strand = {track.strand for track in tracks}
if len(chrom) != 1 or len(strand) != 1:
print("fail to fetch seq: inconsistent chrom or strand")
return None
chrom = list(chrom)[0]
strand = list(strand)[0]
intervals = [Interval(chrom, track.start, track.end, strand) for track in tracks]
intervals = merge_intervals(intervals)
return intervals
def transcript_to_genome_iv(start, end, intervals, reverse=False):
"""
Parameters
----------
start: int
start position in transcript
end: int
end position in transcript
intervals: List[Interval]
coordinate in genome
reverse: bool
whether if it is on the reverse strand
Returns
-------
ivs: List[Interval]
the coordinate for start, end in genome
"""
total_len = sum(i.end - i.start + 1 for i in intervals)
if reverse:
start, end = total_len - end - 1, total_len - start - 1
ivs = []
start_genome = None
end_genome = None
### find start in genome
cur = 0
for i in intervals:
i_len = i.end - i.start + 1
if cur + i_len > start:
start_genome = i.start + start - cur
break
cur += i_len
### find end in genome
cur = 0
for i in intervals:
i_len = i.end - i.start + 1
if cur + i_len > end:
end_genome = i.start + end - cur
break
cur += i_len
### find overlap with (start_genome, end_genome)
for i in intervals:
s = max(i.start, start_genome)
e = min(i.end, end_genome)
if s <= e:
ivs.append(Interval(i.chrom, s, e, i.strand))
return ivs
def fetch_seq(fasta, tracks):
"""
Parameters
----------
fasta: FastaReader
instance of FastaReader
tracks: List[GTFTrack]
list of gtf track
Returns
-------
merged_seq: str
combined seqeunce for the region
"""
intervals = tracks_to_ivs(tracks)
if not isinstance(fasta, FastaReader):
fasta = FastaReader(fasta)
sequences = fasta.query(intervals)
merged_seq = "".join(sequences)
strand = tracks[0].strand
if strand == "-":
return fasta.reverse_complement(merged_seq)
return merged_seq
def search_orfs(fasta, intervals):
"""
Parameters
----------
fasta: FastaReader
instance of FastaReader
intervals: List[Interval]
list of intervals
Returns
-------
orfs: list
list of (List[Interval], seq, leader, trailer)
list of intervals for putative ORF
seq: sequence for the putative ORF
leader: sequence upstream of the ORF
trailer: sequence downstream of the ORF
"""
if not intervals:
return []
orfs = []
if not isinstance(fasta, FastaReader):
fasta = FastaReader(fasta)
intervals = merge_intervals(intervals)
sequences = fasta.query(intervals)
merged_seq = "".join(sequences)
reverse = False
strand = intervals[0].strand
if strand == "-":
merged_seq = fasta.reverse_complement(merged_seq)
reverse = True
start_codons = set(
["ATG", "TTG", "CTG", "GTG", "AAG", "AGG", "ACG", "ACG", "ATA", "ATT", "ATC"]
)
stop_codons = set(["TAG", "TAA", "TGA"])
for sc in start_codons:
cur = 0
while cur < len(merged_seq):
start = merged_seq.find(sc, cur)
if start == -1:
break
cur = start + 1
for i in range(start, len(merged_seq), 3):
if merged_seq[i : i + 3] in stop_codons:
### found orf
ivs = transcript_to_genome_iv(start, i + 2, intervals, reverse)
seq = merged_seq[start:i]
leader = merged_seq[:start]
trailer = merged_seq[i:]
if ivs:
orfs.append((ivs, seq, leader, trailer))
break
return orfs
def prepare_orfs(gtf, fasta, prefix):
"""
Parameters
----------
gtf: GTFReader
instance of GTFReader
fasta: FastaReader
instance of FastaReader
prefix: str
prefix for output file
Returns
-------
cds: List[PutativeORF]
list of CDS
uorfs: List[PutativeORF]
list of upstream ORFs
dorfs: List[PutativeORF]
list of downstream ORFs
"""
if not isinstance(gtf, GTFReader):
gtf = GTFReader(gtf)
if not isinstance(fasta, FastaReader):
fasta = FastaReader(fasta)
print("preparing putative ORFs...")
### process CDS gtf
print("searching cds...")
cds_orfs = []
for gid in tqdm(gtf.cds):
for tid in gtf.cds[gid]:
tracks = gtf.cds[gid][tid]
# seq = fetch_seq(fasta, tracks)
orf = PutativeORF.from_tracks(tracks, "CDS")
if orf:
cds_orfs.append(orf)
### process UTR gtf
utr5 = defaultdict(list)
utr3 = defaultdict(list)
for gid in gtf.utr:
### find first cds and last cds for gene
gene_cds = []
for tid in gtf.cds[gid]:
gene_cds += gtf.cds[gid][tid]
if not gene_cds:
print("fail to find CDS for UTR")
continue
first_cds = gene_cds[0]
for gc in gene_cds:
if gc.start < first_cds.start:
first_cds = gc
last_cds = gene_cds[-1]
for gc in gene_cds:
if gc.end > last_cds.end:
last_cds = gc
for tid in gtf.utr[gid]:
for track in gtf.utr[gid][tid]:
if track.start < first_cds.start:
if track.end >= first_cds.start:
track.end = first_cds.start - 1
if track.strand == "+":
utr5[tid].append(track)
else:
utr3[tid].append(track)
elif track.end > last_cds.end:
if track.start <= last_cds.end:
track.start = last_cds.end + 1
if track.strand == "+":
utr3[tid].append(track)
else:
utr5[tid].append(track)
uorfs = []
print("searching uORFs...")
for tid in tqdm(utr5):
tracks = utr5[tid]
ttype = tracks[0].transcript_type
gid = tracks[0].gene_id
gname = tracks[0].gene_name
gtype = tracks[0].gene_type
chrom = tracks[0].chrom
strand = tracks[0].strand
ivs = tracks_to_ivs(tracks)
orfs = search_orfs(fasta, ivs)
for ivs, seq, leader, trailer in orfs:
orf = PutativeORF(
"uORF",
tid,
ttype,
gid,
gname,
gtype,
chrom,
strand,
ivs,
seq,
leader,
trailer,
)
uorfs.append(orf)
dorfs = []
print("searching dORFs...")
for tid in tqdm(utr3):
tracks = utr3[tid]
ttype = tracks[0].transcript_type
gid = tracks[0].gene_id
gname = tracks[0].gene_name
gtype = tracks[0].gene_type
chrom = tracks[0].chrom
strand = tracks[0].strand
ivs = tracks_to_ivs(tracks)
orfs = search_orfs(fasta, ivs)
for ivs, seq, leader, trailer in orfs:
orf = PutativeORF(
"dORF",
tid,
ttype,
gid,
gname,
gtype,
chrom,
strand,
ivs,
seq,
leader,
trailer,
)
dorfs.append(orf)
### save to file
print("saving putative ORFs file...")
to_write = (
"ORF_ID\tORF_type\ttranscript_id\ttranscript_type"
"\tgene_id\tgene_name\tgene_type\tchrom"
"\tstrand\tcoordinate\tseq\tleader\ttrailer\n"
)
formatter = "{}\t" * 12 + "{}\n"
for orf in tqdm(cds_orfs + uorfs + dorfs):
coordinate = ",".join(
["{}-{}".format(iv.start, iv.end) for iv in orf.intervals]
)
to_write += formatter.format(
orf.oid,
orf.category,
orf.tid,
orf.ttype,
orf.gid,
orf.gname,
orf.gtype,
orf.chrom,
orf.strand,
coordinate,
orf.seq,
orf.leader,
orf.trailer,
)
with open("{}_putative_orfs.tsv".format(prefix), "w") as output:
output.write(to_write)
return (cds_orfs, uorfs, dorfs)
def split_bam(bam, protocol, prefix):
"""Split bam by read length and strand
Parameters
----------
bam : str
Path to bam file
protocol: str
Experiment protocol [forward, reverse]
prefix: str
prefix for output files
Returns
-------
alignments: dict(dict(Counter))
bam split by length, strand, (chrom, pos)
read_lengths: dict
key is the length, value is the number of reads
"""
alignments = defaultdict(lambda: defaultdict(Counter))
read_lengths = defaultdict(int)
total_count = qcfail = duplicate = secondary = unmapped = multi = valid = 0
print("reading bam file...")
bam = pysam.AlignmentFile(bam, "rb")
for r in tqdm(bam.fetch(until_eof=True)):
total_count += 1
if r.is_qcfail:
qcfail += 1
continue
if r.is_duplicate:
duplicate += 1
continue
if r.is_secondary:
secondary += 1
continue
if r.is_unmapped:
unmapped += 1
continue
if not is_read_uniq_mapping(r):
multi += 1
continue
map_strand = "-" if r.is_reverse else "+"
ref_positions = r.get_reference_positions()
strand = None
pos = None
chrom = r.reference_name
# length = r.query_length
length = len(ref_positions)
if protocol == "forward":
if map_strand == "+":
strand = "+"
pos = ref_positions[0]
else:
strand = "-"
pos = ref_positions[-1]
elif protocol == "reverse":
if map_strand == "+":
strand = "-"
pos = ref_positions[-1]
else:
strand = "+"
pos = ref_positions[0]
# convert bam coordinate to one-based
alignments[length][strand][(chrom, pos + 1)] += 1
read_lengths[length] += 1
valid += 1
summary = (
"summary:\n\ttotal_reads: {}\n\tunique_mapped: {}\n"
"\tqcfail: {}\n\tduplicate: {}\n\tsecondary: {}\n"
"\tunmapped:{}\n\tmulti:{}\n\nlength dist:\n"
).format(total_count, valid, qcfail, duplicate, secondary, unmapped, multi)
for length in read_lengths:
summary += "\t{}: {}\n".format(length, read_lengths[length])
with open("{}_bam_summary.txt".format(prefix), "w") as output:
output.write(summary)
return (alignments, read_lengths)
def align_metagenes(metagenes, read_lengths, prefix):
"""align metagene coverages to determine the lag of the psites
Parameters
----------
metagenes: dict
key is the length, value is the metagene coverage
read_lengths: dict
key is the length, value is the number of reads
prefix: str
prefix for output files
Returns
-------
psite_offsets: dict
key is the length, value is the offset
"""
print("aligning metagene profiles from different lengths...")
psite_offsets = {}
base = n_reads = 0
for length, reads in list(read_lengths.items()):
if reads > n_reads:
base = length
n_reads = reads
reference = metagenes[base].values
to_write = "relative lag to base: {}\n".format(base)
for length, meta in list(metagenes.items()):
cov = meta.values
xcorr = np.correlate(reference, cov, "full")
origin = len(xcorr) // 2
bound = min(base, length)
xcorr = xcorr[origin - bound : origin + bound]
lag = np.argmax(xcorr) - len(xcorr) // 2
psite_offsets[length] = lag
to_write += "\tlag of {}: {}\n".format(length, lag)
with open("{}_psite_offsets.txt".format(prefix), "w") as output:
output.write(to_write)
return psite_offsets
def merge_lengths(alignments, psite_offsets):
"""
Parameters
----------
alignments: dict(dict(Counter))
bam split by length, strand
psite_offsets: dict
key is the length, value is the offset
Returns
-------
merged_alignments: dict(dict)
alignments by merging all lengths
"""
print("merging different lengths...")
merged_alignments = defaultdict(Counter)
for length, offset in list(psite_offsets.items()):
for strand in alignments[length]:
for chrom, pos in alignments[length][strand]:
count = alignments[length][strand][(chrom, pos)]
if strand == "+":
pos_shifted = pos + offset
else:
pos_shifted = pos - offset
merged_alignments[strand][(chrom, pos_shifted)] += count
return merged_alignments
def parse_annotation(annotation):
"""
Parameters
----------
annotation: string
path of annotation file generated by prepare_orfs
Returns
-------
cds: List[PutativeORF]
list of cds
uorfs: List[PutativeORF]
list of putative ORFs from 5'UTR
dorfs: List[PutativeORF]
list of putative ORFs from 3'UTR
"""
cds = []
uorfs = []
dorfs = []
print("parsing putative ORFs...")
with open(annotation, "r") as anno:
total_lines = len(["" for line in anno])
with open(annotation, "r") as anno:
with tqdm(total=total_lines) as pbar:
header = True
for line in anno:
pbar.update()
if header:
header = False
continue
orf = PutativeORF.from_string(line)
if orf is None:
continue
if orf.category == "CDS":
cds.append(orf)
elif orf.category == "uORF":
uorfs.append(orf)
elif orf.category == "dORF":
dorfs.append(orf)
return (cds, uorfs, dorfs)
def orf_coverage(orf, alignments, offset_5p=20, offset_3p=0):
"""
Parameters
----------
orf: PutativeORF
instance of PutativeORF
alignments: dict(Counter)
alignments summarized from bam by merging lengths
offset_5p: int
the number of nts to include from 5'prime
offset_3p: int
the number of nts to include from 3'prime
Returns
-------
coverage: Series
coverage for ORF for specific length
"""
coverage = []
chrom = orf.chrom
strand = orf.strand
if strand == "-":
offset_5p, offset_3p = offset_3p, offset_5p
first, last = orf.intervals[0], orf.intervals[-1]
for pos in range(first.start - offset_5p, first.start):
try:
coverage.append(alignments[strand][(chrom, pos)])
except KeyError:
coverage.append(0)
for iv in orf.intervals:
for pos in range(iv.start, iv.end + 1):
try:
coverage.append(alignments[strand][(chrom, pos)])
except KeyError:
coverage.append(0)
for pos in range(last.end + 1, last.end + offset_3p + 1):
try:
coverage.append(alignments[strand][(chrom, pos)])
except KeyError:
coverage.append(0)
if strand == "-":
coverage.reverse()
return pd.Series(
np.array(coverage), index=np.arange(-offset_3p, len(coverage) - offset_3p)
)
else:
return pd.Series(
np.array(coverage), index=np.arange(-offset_5p, len(coverage) - offset_5p)
)
def orf_coverage_length(orf, alignments, length, offset_5p=20, offset_3p=0):
"""
Parameters
----------
orf: PutativeORF
instance of PutativeORF
alignments: dict(dict(Counter))
alignments summarized from bam
length: int
the target length
offset_5p: int
the number of nts to include from 5'prime
offset_3p: int
the number of nts to include from 3'prime
Returns
-------
coverage: Series
coverage for ORF for specific length
"""
coverage = []
chrom = orf.chrom
strand = orf.strand
if strand == "-":
offset_5p, offset_3p = offset_3p, offset_5p
first, last = orf.intervals[0], orf.intervals[-1]
for pos in range(first.start - offset_5p, first.start):
try:
coverage.append(alignments[length][strand][(chrom, pos)])
except KeyError:
coverage.append(0)
for iv in orf.intervals:
for pos in range(iv.start, iv.end + 1):
try:
coverage.append(alignments[length][strand][(chrom, pos)])
except KeyError:
coverage.append(0)
for pos in range(last.end + 1, last.end + offset_3p + 1):
try:
coverage.append(alignments[length][strand][(chrom, pos)])
except KeyError:
coverage.append(0)
if strand == "-":
coverage.reverse()
return pd.Series(
np.array(coverage), index=np.arange(-offset_3p, len(coverage) - offset_3p)
)
else:
return pd.Series(
np.array(coverage), index=np.arange(-offset_5p, len(coverage) - offset_5p)
)
def metagene_coverage(
cds,
alignments,
read_lengths,
prefix,
max_positions=500,
offset_5p=20,
offset_3p=0,
meta_min_reads=100000,
):
"""
Parameters
----------
cds: List[PutativeORF]
list of cds
alignments: dict(dict(Counter))
alignments summarized from bam
read_lengths: dict
key is the length, value is the number reads
prefix: str
prefix for the output file
max_positions: int
the number of nts to include
offset_5p: int
the number of nts to include from the 5'prime
offset_3p: int
the number of nts to include from the 3'prime
Returns
-------
metagenes: dict
key is the length, value is the metagene coverage
"""
print("calculating metagene profiles...")
metagenes = {}
lengths = [x for x in read_lengths if read_lengths[x] >= meta_min_reads]
for length in tqdm(lengths):
metagene_coverage = pd.Series()
for orf in tqdm(cds):
coverage = orf_coverage_length(
orf, alignments, length, offset_5p, offset_3p
)
if len(coverage.index) > 0:
min_index = min(coverage.index.tolist())
max_index = max(coverage.index.tolist())
coverage = coverage[np.arange(min_index, min(max_index, max_positions))]
if coverage.mean() > 0:
metagene_coverage = metagene_coverage.add(coverage, fill_value=0)
metagenes[length] = metagene_coverage
return metagenes
def plot_read_lengths(read_lengths, prefix):
"""
Parameters
----------
read_lengths: dict
key is the length, value is the number of reads
prefix: str
prefix for the output file
"""
print("plotting read length distribution...")
fig, ax = plt.subplots()
x = sorted(read_lengths.keys())
y = [read_lengths[i] for i in x]
ax.bar(x, y)
ax.set_xlabel("Read length")
ax.set_ylabel("Number of reads")
ax.set_title("Read length distribution")
fig.tight_layout()
fig.savefig("{}_read_length_dist.pdf".format(prefix))
plt.close()
def plot_metagene(metagenes, read_lengths, prefix, offset=60):
"""
Parameters
----------
metagenes: dict
key is the length, value is the metagene coverage
read_lengths: dict
key is the length, value is the number of reads
prefix: str
prefix for the output file
"""
print("plotting metagene profiles...")
total_reads = sum(read_lengths.values())
with PdfPages("{}_metagene_plots.pdf".format(prefix)) as pdf:
for length in sorted(metagenes):
metagene_cov = metagenes[length]
if len(metagene_cov) == 0:
continue
corr, pval, nonzero = cal_periodicity(metagene_cov.values)
min_index = min(metagene_cov.index.tolist())
max_index = max(metagene_cov.index.tolist())
offset = min(offset, max_index)
metagene_cov = metagene_cov[np.arange(min_index, offset)]
x = np.arange(min_index, offset)
colors = np.tile(["r", "g", "b"], len(x) // 3 + 1)
xticks = np.arange(min_index, offset, 20)
ratio = read_lengths[length] / total_reads
fig, (ax, ax2) = plt.subplots(nrows=2, ncols=1)
ax.vlines(x, ymin=np.zeros(len(x)), ymax=metagene_cov, colors=colors)
ax.tick_params(axis="x", which="both", top="off", direction="out")
ax.set_xticks(xticks)
ax.set_xlim((min_index, offset))
ax.set_xlabel("Distance from start codon (nt)")
ax.set_ylabel("Number of reads")
ax.set_title(
(
"{} nt reads, proportion: {:.2%}\nPeriodicity: {:.2}, pval: {:.6}"
).format(length, ratio, corr, pval)
)
fig.tight_layout()
pdf.savefig(fig)
plt.close()
def export_orf_coverages(orfs, merged_alignments, prefix, min_count=0, min_corr=0.5):
"""
Parameters
----------
orfs: List[PutativeORF]
a list of putative orfs
merged_alignments: dict(dict)
alignments by merging all lengths
prefix: str
prefix for output file
"""
print("exporting coverages for all ORFs...")
to_write = "ORF_ID\tcoverage\tcount\tlength\tnonzero\tperiodicity\tpval\n"
for orf in tqdm(orfs):
oid = orf.oid
cov = orf_coverage(orf, merged_alignments)
cov = cov.astype(int)
cov = cov.tolist()
count = sum(cov)
length = len(cov)
if len(cov) < 60:
corr, pval, nonzero = (0, 1, 0)
else:
corr, pval, nonzero = cal_periodicity(cov)
to_write += "{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
oid, cov, count, length, nonzero, corr, pval
)
with open("{}_translating_ORFs.tsv".format(prefix), "w") as output:
output.write(to_write)
def export_wig(merged_alignments, prefix):
"""
Parameters
----------
merged_alignments: dict(dict)
alignments by merging all lengths
prefix: str
prefix of output wig files
"""
print("exporting merged alignments to wig file...")
for strand in merged_alignments:
to_write = ""
cur_chrom = ""
for chrom, pos in sorted(merged_alignments[strand]):
if chrom != cur_chrom:
cur_chrom = chrom
to_write += "variableStep chrom={}\n".format(chrom)
to_write += "{}\t{}\n".format(pos, merged_alignments[strand][(chrom, pos)])
if strand == "+":
fname = "{}_pos.wig".format(prefix)
else:
fname = "{}_neg.wig".format(prefix)
with open(fname, "w") as output:
output.write(to_write)
def detect_orfs(bam, prefix, gtf=None, fasta=None, annotation=None, protocol=None):
"""
Parameters
----------
gtf: str
Path to the GTF file
fasta: str
Path to the FASTA file
bam: str
Path to the bam file
prefix: str
prefix for all output files
annotation: str
Path for annontation files of putative ORFs
It will be automatically generated if None
protocol: str
'forward' for stranded, 'reverse' for reverse stranded
It will be automatically inferred if None
"""
cds = uorfs = dorfs = None
if gtf and not isinstance(gtf, GTFReader):
gtf = GTFReader(gtf)
if fasta and not isinstance(fasta, FastaReader):
fasta = FastaReader(fasta)
if annotation is None:
cds, uorfs, dorfs = prepare_orfs(gtf, fasta, prefix)
else:
cds, uorfs, dorfs = parse_annotation(annotation)
if protocol is None:
protocol = infer_protocol(bam, gtf, prefix)
alignments, read_lengths = split_bam(bam, protocol, prefix)
plot_read_lengths(read_lengths, prefix)
metagenes = metagene_coverage(cds, alignments, read_lengths, prefix)
plot_metagene(metagenes, read_lengths, prefix)
psite_offsets = align_metagenes(metagenes, read_lengths, prefix)
merged_alignments = merge_lengths(alignments, psite_offsets)
export_wig(merged_alignments, prefix)
export_orf_coverages(cds + uorfs + dorfs, merged_alignments, prefix)
|
import pytest
@pytest.mark.vcr
def test_vcrtest(dataproxy):
seq = dataproxy.get_sequence("NC_000013.11",50_000_000,50_000_050)
assert len(seq) == 50
assert seq == "TTAGGTGTTTAGATGATTTCTAAGATGCTTTTAAGCCCAGTATTTCTATT"
|
# coding=utf-8
from google.appengine.ext import ndb
class BookRecord(ndb.Model):
# item_ids are stored in key like so '1234|345345|345254352'
item_id_array = ndb.StringProperty(repeated=True)
author = ndb.StringProperty(indexed=False)
title = ndb.StringProperty(indexed=False)
year = ndb.IntegerProperty(indexed=False)
count = ndb.IntegerProperty(indexed=False)
class BookAnnotation(ndb.Model):
# item_ids are stored in key like so '1234|345345|345254352'
short = ndb.StringProperty(indexed=False)
long = ndb.TextProperty(indexed=False)
|
import unittest
from languages.asp.asp_mapper import ASPMapper
from test.language.asp.cell import Cell
class ASPMapperTest(unittest.TestCase):
def runTest(self):
instance = ASPMapper.get_instance()
try:
instance.register_class(Cell)
obj = instance.get_object("cell(1,2,5)")
self.assertTrue(isinstance(obj, Cell))
self.assertEqual(1, obj.get_row())
self.assertEqual(2, obj.get_column())
self.assertEqual('5', obj.get_value().value)
print(instance.get_string(obj))
self.assertEqual("cell(1,2,5)", instance.get_string(obj))
instance.unregister_class(Cell)
noneObject = instance.get_object("cell(1,2,5)")
self.assertIsNone(noneObject)
except Exception as e:
self.fail(str(e))
if __name__ == '__main__':
unittest.main()
|
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
even_numbers = [n for n in numbers if n % 2 == 0]
print(even_numbers)
|
'''
Crie um programa que leia um número inteiro
e mostre na tela se ele é par ou ímpar.
'''
num = int(input('Digite um número inteiro: '))
if num % 2 == 0:
print('O número {} é PAR!'.format(num))
else:
print('O número {} é ÍMPAR!'.format(num))
|
nome=input('Olá amigo(a)! como você se chama? ')
print('Que belo nome! Muito prazer em te conhecer {}! Meu nome é Rex!'.format(nome))
idade=input('Quantos anos você tem? ')
print('Nossa, quanta experiência né? Hahaha. Eu tenho 74 anos. ')
conta=input('quero praticar minha matemática... Você pode me ajudar? ')
sub=int(74)-int(idade)
print=input('Fiz as contas aqui, nossa diferença de idade é de {}. Correto? '.format(sub))
print=input('Hahahaha, o pai tá online! Valeu {}, foi um prazer te conhecer. Até mais!'.format(nome))
|
import bs4
import requests
import smtp,time
url = "Url_to_the_product"
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
#Scraping the data according the url provided
def check_price():
page = requests.get(url,headers=headers)
soup = BeautifulSoup(page.content,'html.parser')
title = soup.find(id="productTitle").get_text()
price = soup.find(id="priceblock_ourprice").get_text()
conerted_price = float(price)
if(converted_price < "Your budget"):
send_mail()
print(converted_price)
print(title.strip())
#Sending Mail using Gmail
def send_mail():
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo()
server.login('Your_Gmail_id','Password')
subject = 'Price fell down!!!'
body = 'Check the amazon link to buy'+url
msg = f"Subject: {subject}\n\n{body}"
server.sendmail(
"sender_email",
"receiver_email",
msg
)
print("Mail has been sent")
server.quit()
#Function Call
check_price()
|
#crop_map.py
def crop_map( image , xi, xf, yi, yf):
#"Crops the map out of the whole screenshot"
print(image)
map_img = image[xi:xf][yi:yf]
return [map_img]
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import collections
import unittest
from iptest import IronPythonTestCase, path_modifier, run_test
glb = 0
res = ''
global_class_member = "wrong value for the global_class_member"
def nop():
pass
class gc:
pass
try:
import System
from System import GC
gc.collect = GC.Collect
gc.WaitForPendingFinalizers = GC.WaitForPendingFinalizers
except ImportError:
import gc
gc.collect = gc.collect
gc.WaitForPendingFinalizers = nop
def FullCollect():
gc.collect()
gc.WaitForPendingFinalizers()
def Hello():
global res
res = 'Hello finalizer'
#########################################
# class implements finalizer
class Foo:
def __del__(self):
global res
res = 'Foo finalizer'
#########################################
# class doesn't implement finalizer
class Bar:
pass
######################
# Try to delete a builtin name. This should fail since "del" should not
# lookup the builtin namespace
def DoDelBuiltin():
global pow
del(pow)
def DoDelGlobal():
global glb
del glb
return True
######################
# Try to delete a name from an enclosing function. This should fail since "del" should not
# lookup the enclosing namespace
def EnclosingFunction():
val = 1
def DelEnclosingName():
del val
DelEnclosingName()
def get_true():
return True
def get_false():
return False
def get_n(n):
return n
def fooCheck():
exec("foo = 42")
return foo
selph = None
def execAdd():
exec('a=2')
selph.assertTrue(locals() == {'a': 2})
def execAddExisting():
b = 5
exec('a=2')
selph.assertTrue(locals() == {'a': 2, 'b':5})
def execAddExistingArgs(c):
b = 5
exec('a=2')
selph.assertTrue(locals() == {'a': 2, 'b': 5, 'c':7})
def execDel():
a = 5
exec('del(a)')
selph.assertEqual(locals(), {})
def nolocals():
selph.assertEqual(locals(), {})
def singleLocal():
a = True
selph.assertEqual(locals(), {'a' : True})
def nolocalsWithArg(a):
selph.assertEqual(locals(), {'a' : 5})
def singleLocalWithArg(b):
a = True
selph.assertEqual(locals(), {'a' : True, 'b' : 5})
def delSimple():
a = 5
selph.assertTrue(locals() == {'a' : 5})
del(a)
selph.assertTrue(locals() == {})
def iteratorFunc():
for i in range(1):
selph.assertTrue(locals() == {'i' : 0})
yield i
def iteratorFuncLocals():
a = 3
for i in range(1):
selph.assertTrue(locals() == {'a' : 3, 'i' : 0})
yield i
def iteratorFuncWithArg(b):
for i in range(1):
selph.assertEqual(locals(), {'i' : 0, 'b' : 5})
yield i
def iteratorFuncLocalsWithArg(b):
a = 3
for i in range(1):
selph.assertTrue(locals() == {'a' : 3, 'i' : 0, 'b' : 5})
yield i
def delIter():
a = 5
yield 2
selph.assertTrue(locals() == {'a' : 5})
del(a)
yield 3
selph.assertTrue(locals() == {})
def unassigned():
selph.assertTrue(locals() == {})
a = 5
selph.assertEqual(locals(), {'a' : 5})
def unassignedIter():
yield 1
selph.assertTrue(locals() == {})
yield 2
a = 5
yield 3
selph.assertTrue(locals() == {'a' : 5})
yield 4
def reassignLocalsIter():
yield 1
locals = 2
yield 2
selph.assertTrue(locals == 2)
def nested_locals():
# Test locals in nested functions
x = 5
def f():
a = 10
selph.assertEqual(locals(),{'a' : 10})
f()
def nested_locals2():
# Test locals in nested functions with locals also used in outer function
x = 5
def f():
a = 10
selph.assertEqual(locals(),{'a' : 10})
selph.assertEqual(sorted(locals().keys()), ['f', 'x'])
f()
def modifyingLocal(a):
selph.assertEqual(a, 10)
a = 8
selph.assertEqual(a, 8)
selph.assertEqual(locals(), {'a' : 8 })
def test_namebinding_locals_and_class_impl():
xyz = False
class C:
locals()["xyz"] = True
passed = xyz
selph.assertTrue(C.passed == True)
def localsAfterExpr():
exec("pass")
10
exec("pass")
def my_locals():
selph.fail("Calling wrong locals")
class NameBindingTest(IronPythonTestCase):
def setUp(self):
super(NameBindingTest, self).setUp()
# this is a hack to get scoping requirements for testing correct
# python treats scope differently if you are nested inside of a class
global selph
selph = self
def test_nolocals(self):
nolocals()
def test_singlelocal(self):
singleLocal()
def test_nolocalsWithArg(self):
nolocalsWithArg(5)
def test_singleLocalWithArg(self):
singleLocalWithArg(5)
def test_delSimple(self):
delSimple()
def test_iteratorFuncs(self):
for a in iteratorFunc(): pass
for a in iteratorFuncLocals(): pass
for a in iteratorFuncWithArg(5): pass
for a in iteratorFuncLocalsWithArg(5): pass
for a in delIter(): pass
def test_exec(self):
execAdd()
execAddExisting()
execAddExistingArgs(7)
execDel()
def test_reassignLocals(self):
def reassignLocals():
locals = 2
self.assertTrue(locals == 2)
reassignLocals()
def test_unassigned(self):
unassigned()
def test_iter(self):
for a in unassignedIter(): pass
for a in reassignLocalsIter(): pass
def test_nested_locals(self):
nested_locals()
nested_locals2()
@unittest.expectedFailure # this also fails on CPython, scoping rules
def test_namebinding_locals_and_class(self):
"""Test that namebinding uses name lookup when locals() is accessed inside a class"""
test_namebinding_locals_and_class_impl()
def test_modifying_local(self):
modifyingLocal(10)
def test_namebinding_in_functon(self):
# name binding in a function
def f():
a = 2
class c:
exec("a = 42")
abc = a
return c
self.assertEqual(f().abc, 2)
def test_DelBuiltin(self):
# Check that "pow" is defined
global pow
p = pow
self.assertRaises(NameError, DoDelBuiltin)
self.assertRaises(NameError, DoDelBuiltin)
def test_SimpleTest(self):
"""simple case"""
global res
res = ''
f = Foo()
del(f)
FullCollect()
self.assertTrue(res == 'Foo finalizer')
def test_00_DelUndefinedGlobal(self):
del globals()["glb"]
self.assertRaises(NameError, DoDelGlobal)
self.assertRaises(NameError, DoDelGlobal)
def test_01_DefineGlobal(self):
globals()["glb"] = 10
def test_02_DelDefinedGlobal(self):
# Check that "glb" is defined
global glb
l = glb
self.assertTrue(DoDelGlobal() == True)
self.assertRaises(NameError, DoDelGlobal)
self.assertRaises(NameError, DoDelGlobal)
def test_PerInstOverride(self):
"""per-instance override"""
global res
res = ''
def testIt():
f = Foo()
f.__del__ = Hello
self.assertTrue(hasattr(Foo, '__del__'))
self.assertTrue(hasattr(f, '__del__'))
del(f)
testIt()
FullCollect()
self.assertTrue(res == 'Hello finalizer')
def test_PerInstOverrideAndRemove(self):
"""per-instance override & remove"""
global res
def testit():
global res
global Hello
res = ''
f = Foo()
f.__del__ = Hello
self.assertTrue(hasattr(Foo, '__del__'))
self.assertTrue(hasattr(f, '__del__'))
del(f.__del__)
self.assertTrue(hasattr(Foo, '__del__'))
self.assertTrue(hasattr(f, '__del__'))
del(f)
testit()
FullCollect()
self.assertTrue(res == 'Foo finalizer')
def PerInstOverrideAndRemoveBoth(self):
"""per-instance override & remove both"""
global res
res = ''
f = Foo()
self.assertTrue(hasattr(Foo, '__del__'))
self.assertTrue(hasattr(f, '__del__'))
f.__del__ = Hello
self.assertTrue(hasattr(Foo, '__del__'))
self.assertTrue(hasattr(f, '__del__'))
FullCollect()
FullCollect()
del(Foo.__del__)
self.assertTrue(hasattr(Foo, '__del__') == False)
self.assertTrue(hasattr(f, '__del__'))
del(f.__del__)
dir(f)
self.assertTrue(hasattr(Foo, '__del__') == False)
self.assertTrue(hasattr(f, '__del__') == False)
FullCollect()
FullCollect()
del(f)
FullCollect()
self.assertTrue(res == '')
def test_NoFinAddToInstance(self):
"""define finalizer after instance creation"""
global res
res = ''
def inner():
b = Bar()
self.assertTrue(hasattr(Bar, '__del__') == False)
self.assertTrue(hasattr(b, '__del__') == False)
b.__del__ = Hello
self.assertTrue(hasattr(Bar, '__del__') == False)
self.assertTrue(hasattr(b, '__del__'))
del(b)
inner()
FullCollect()
self.assertTrue(res == 'Hello finalizer')
def test_NoFinAddToInstanceAndRemove(self):
"""define & remove finalizer after instance creation"""
global res
res = ''
b = Bar()
self.assertTrue(hasattr(Bar, '__del__') == False)
self.assertTrue(hasattr(b, '__del__') == False)
b.__del__ = Hello
self.assertTrue(hasattr(Bar, '__del__') == False)
self.assertTrue(hasattr(b, '__del__'))
del(b.__del__)
self.assertTrue(hasattr(Bar, '__del__') == False)
self.assertTrue(hasattr(b, '__del__') == False)
del(b)
FullCollect()
self.assertTrue(res == '')
def _test_undefined(self, function, *args):
try:
function(*args)
except NameError as n:
self.assertTrue("'undefined'" in str(n), "%s: expected undefined variable exception, but different NameError caught: %s" % (function.__name__, n))
else:
self.fail("%s: expected undefined variable exception, but no exception caught" % function.__name__)
def _test_unassigned(self, function, *args):
try:
function(*args)
except UnboundLocalError as n:
pass
else:
self.fail("%s: expected unassigned variable exception, but no exception caught" % function.__name__)
def _test_attribute_error(function, *args):
try:
function(*args)
except AttributeError:
pass
else:
self.fail("%s: expected AttributeError, but no exception caught" % function.__name__)
def test_undefined_local(self):
"""straightforward undefined local"""
def test():
x = undefined
undefined = 1 # create local binding
self._test_undefined(test)
def test_undefined_multiple_assign(self):
"""longer assignment statement"""
def test():
x = y = z = undefined
undefined = 1 # create local binding
self._test_undefined(test)
def test_udefined_aug(self):
"""aug assignment"""
def test():
undefined += 1 # binds already
self._test_undefined(test)
def test_undefined_assigned_to_self(self):
"""assigned to self"""
def test():
undefined = undefined
self._test_undefined(test)
def test_explicit_deletion(self):
"""explicit deletion"""
def test():
del undefined
self._test_undefined(test)
def test_if_statement(self):
"""if statement"""
def test():
if get_false():
undefined = 1 # unreachable
x = undefined
self._test_undefined(test)
def test_if_statement_2(self):
"""if statement"""
def test():
if get_false():
undefined = 1
else:
x = 1
x = undefined
self._test_undefined(test)
def test_if_statement_3(self):
"""if statement"""
def test():
if get_true():
pass
else:
undefined = 1
x = undefined
self._test_undefined(test)
def test_nested_if_statement(self):
"""nested if statements"""
def test():
if get_false():
if get_true():
undefined = 1
else:
undefined = 1
x = undefined
self._test_undefined(test)
def test_if_elif_elif_elif(self):
"""if elif elif elif"""
def test():
n = get_n(10)
if n == 1:
undefined = n
elif n == 2:
undefined = n
elif n == 3:
undefined = n
elif n == 4:
undefined = n
elif n == 5:
undefined = n
else:
pass
n = undefined
self._test_undefined(test)
def test_for_loop(self):
"""for"""
def test():
for i in range(get_n(0)):
undefined = i
x = undefined
self._test_undefined(test)
def test_for_loop_2(self):
"""more for with else that doesn't always bind"""
def test():
for i in range(get_n(0)):
undefined = i
else:
if get_false():
undefined = 1
elif get_false():
undefined = 1
else:
pass
x = undefined
self._test_undefined(test)
def test_for_with_break(self):
"""for with break"""
def test():
for i in range(get_n(10)):
break
undefined = 10
x = undefined
self._test_undefined(test)
def test_for_with_break_and_else(self):
"""for with break and else"""
def test():
for i in range(get_n(10)):
break
undefined = 10
else:
undefined = 20
x = undefined
self._test_undefined(test)
def test_for_with_break_and_else_2(self):
"""for with break and else"""
def test():
for i in range(get_n(10)):
if get_true():
break
undefined = 10
else:
undefined = 20
x = undefined
self._test_undefined(test)
def test_for_with_break_and_else_conditional_init(self):
"""for with break and else and conditional initialization"""
def test():
for i in range(get_n(10)):
if get_false():
undefined = 10
if get_true():
break
undefined = 10
else:
undefined = 20
x = undefined
self._test_undefined(test)
def test_deep_delete(self):
"""delete somewhere deep"""
def test():
for i in range(get_n(10)):
undefined = 10
if get_false():
del undefined
if get_true():
del undefined
if get_true():
break
undefined = 10
else:
undefined = 20
x = undefined
self._test_undefined(test)
def test_bound_by_for(self):
# bound by for
def test():
for undefined in []:
pass
print(undefined)
self._test_undefined(test)
def test_more_binding_constructs(self):
"""more binding constructs"""
def test():
try:
1/0
undefined = 1
except:
pass
x = undefined
self._test_undefined(test)
def test():
try:
pass
except Error as undefined:
pass
x = undefined
self._test_undefined(test)
def test_bound_by_import(self):
"""bound by import statement"""
def test():
x = undefined
import undefined
self._test_undefined(test)
def test_bound_by_import_2(self):
"""same here"""
def test():
x = undefined
import defined as undefined
self._test_undefined(test)
def test_del_var_1(self):
"""del"""
def test():
undefined = 1
del undefined
x = undefined
self._test_undefined(test)
def test_conditional_del(self):
"""conditional del"""
def test():
undefined = 10
if get_true():
del undefined
else:
undefined = 1
x = undefined
self._test_undefined(test)
def test_del_in_loop(self):
"""del in the loop"""
def test():
undefined = 10
for i in [1]:
if i == get_n(1):
del undefined
x = undefined
self._test_undefined(test)
def test_del_in_loop_in_condition(self):
"""del in the loop in condition"""
def test():
undefined = 10
for i in [1,2,3]:
if i == get_n(1):
continue
elif i == get_n(2):
del undefined
else:
break
x = undefined
self._test_undefined(test)
def test_params_del(self):
def test(undefined):
self.assertEqual(undefined, 1)
del undefined
x = undefined
self._test_undefined(test, 1)
def test_params_del_2(self):
def test(undefined):
self.assertEqual(undefined, 1)
x = undefined
self.assertEqual(x, 1)
del undefined
y = x
self.assertEqual(y, 1)
x = undefined
self._test_undefined(test, 1)
def test_params_del_3(self):
def test(a):
if get_false(): return
x = a
undefined = a
del undefined
x = undefined
self._test_undefined(test, 1)
def test_default_params_unassigned(self):
def test():
if get_false():
x = 1
def f(a = x):
locals()
self._test_unassigned(test)
def test_class_base(self):
def test():
if get_false():
x = dict
class C(x):
locals()
self._test_unassigned(test)
def test_conditional_member_def(self):
def test():
class C(object):
if get_false():
x = 1
C().x
self._test_attribute_error(test)
def test_member_access_on_undefined(self):
def test():
undefined.member = 1
locals()
self._test_undefined(test)
def test_item_access_on_undefined(self):
def test():
undefined[1] = 1
locals()
self._test_undefined(test)
def test_item_access_on_undefined_in_tuple(self):
def test():
(undefined[1],x) = 1,2
locals()
self._test_undefined(test)
def test_item_access_on_undefined_in_list(self):
def test():
[undefined[1],x] = 1,2
locals()
self._test_undefined(test)
def test_item_index_undefined(self):
def test():
dict()[undefined] = 1
locals()
self._test_undefined(test)
def test_nested_scope_variable_access(self):
def test():
def g():
def f():
x = undefined
return f
g()()
undefined = 1
self._test_undefined(test)
def test_del_local_var(self):
result = "Failed"
a = 2
b = a
del a
try:
b = a
except NameError:
result = "Success"
self.assertTrue(result == "Success")
def test_del_class_attr(self):
class C:
pass
c = C()
c.a = 10
self.assertTrue("a" in dir(c))
del c.a
self.assertTrue(not "a" in dir(c))
C.a = 10
self.assertTrue("a" in dir(C))
del C.a
self.assertTrue(not "a" in dir(C))
def test_dir_list(self):
for m in dir([]):
c = callable(m)
a = getattr([], m)
def test_del_undefined(self):
success = False
try:
del this_name_is_undefined
except NameError:
success = True
self.assertTrue(success)
def test_delete_builting_func(self):
## delete builtin func
import builtins
try:
del pow
self.assertUnreachable("should have thrown")
except NameError: pass
def test_class_var(self):
class C(object):
name = None
def test(self):
print(name)
self.assertRaises(NameError, C().test)
def test_delete_from_builtins(self):
import builtins
from importlib import reload
try:
del builtins.pow
self.assertRaises(NameError, lambda: pow)
self.assertRaises(AttributeError, lambda: builtins.pow)
finally:
reload(builtins)
# make sure we still have access to builtins' after reloading
# self.assertEqual(pow(2,2), 4) # bug 359890
dir('abc')
def test_override_builtin_method(self):
"""Overriding builtins method inconsistent with -X:LightweightScopes flag"""
import builtins
builtins.help = 10
self.assertRaisesPartialMessage(TypeError, "is not callable", lambda: help(dir))
def test_runtime_name_lookup_class_scopes(self):
# Test that run time name lookup skips over class scopes
# (because class variables aren't implicitly accessible inside member functions)
a = 123
class C(object):
a = 456
def foo(self):
# this exec statement is here to cause the "a" in the next statement
# to have its name looked up at run time.
exec("dummy = 10", {}, locals())
return a # a should bind to the global a, not C.a
self.assertEqual(C().foo(), 123)
def test_cp20956(self):
class C:
codeplex_20956 = 3
def foo(self):
return codeplex_20956
self.assertRaisesMessage(NameError, "global name 'codeplex_20956' is not defined",
C().foo)
def test_import_as(self):
class ImportAsInClass:
import sys as sys_in_class
class ImportInClass:
import sys
self.assertTrue(ImportAsInClass.sys_in_class is ImportInClass.sys)
class FromImportAsInClass:
from sys import path as sys_path
class FromImportInClass:
from sys import path
self.assertTrue(FromImportAsInClass.sys_path is FromImportInClass.path)
def test_global_and_class_member(self):
class ClassWithGlobalMember:
global global_class_member
global_class_member = "global class member value"
self.assertTrue(not hasattr(ClassWithGlobalMember, "global_class_member"), "invalid binding of global in a class")
self.assertEqual(global_class_member, "global class member value")
def test_locals_copy(self):
def f():
abc = eval("locals()")
abc['foo'] = 42
print(foo)
self.assertRaises(NameError, f)
def f():
x = locals()
x['foo'] = 42
print(foo)
self.assertRaises(NameError, f)
def f():
x = vars()
x['foo'] = 42
print(foo)
self.assertRaises(NameError, f)
def f():
import os
fname = 'temptest_%d.py' % os.getpid()
with open(fname, 'w+') as f:
f.write('foo = 42')
try:
execfile(fname)
finally:
os.unlink(fname)
return foo
self.assertRaises(NameError, f)
self.assertEqual(fooCheck(), 42)
def f():
import os
module_name = 'temptest_%d' % os.getpid()
fname = module_name + '.py'
with open(fname, 'w+') as f:
f.write('foo = 42')
try:
with path_modifier('.'):
foo = __import__(module_name).foo
finally:
os.unlink(fname)
return foo
self.assertEqual(f(), 42)
def f():
exec("foo = 42", {})
return foo
self.assertRaises(NameError, f)
def F():
a = 4
class C:
field=7
def G(self):
b = a
b = 4
return locals()
c = C()
return c.G()
self.assertEqual(set(F()), set(['self', 'b', 'a']))
def f():
a = 10
def g1():
return a, locals()
def g2():
return locals()
res = [g1()]
res.append(g2())
return res
self.assertEqual(f(), [(10, {'a':10}), {}])
X = (42, 43)
class Y:
def outer_f(self):
def f(X):
(a, b) = X
return a, b
return f
a = Y()
self.assertEqual(a.outer_f()(), (42, 43))
run_test(__name__)
if __name__ == '__main__':
selph.assertTrue('builtins' in locals())
a = 5
selph.assertTrue('a' in locals())
exec('a = a+1')
selph.assertTrue(locals()['a'] == 6)
exec("pass")
locals = my_locals
exec("pass")
import builtins
save_locals = builtins.locals
try:
builtins.locals = my_locals
exec("pass")
finally:
builtins.locals = save_locals
localsAfterExpr()
del locals
# verify builtins is accessed if a global isn't defined
xyz = 'aaa'
selph.assertEqual(xyz, 'aaa')
import builtins
builtins.xyz = 'abc'
xyz = 'def'
selph.assertEqual(xyz, 'def')
del xyz
selph.assertEqual(xyz, 'abc')
del builtins.xyz
try:
a = xyz
except NameError: pass
|
import re
from exclude_modules_b import match
def a():
re.match()
def b():
match()
a()
b()
|
from pathlib import Path
import aiofiles
import asyncio
from inflection import underscore
import aiodocker
import os
import re
from prodict import Prodict
import ujson
import subprocess
from pprint import pprint
from .. import logger
"""
links:
http://aiodocker.readthedocs.io/en/latest/
https://docs.docker.com/engine/api/v1.37/#operation/ContainerList
https://docs.docker.com/engine/api/v1.24/#31-containers
"""
def tar_image_cmd(path):
return ['tar', '-C', path, '-c', '-X', '.dockerignore', '.']
def underdict(obj):
if isinstance(obj, dict):
new_dict = {}
for key, value in obj.items():
key = underscore(key)
new_dict[key] = underdict(value)
return new_dict
# if hasattr(obj, '__iter__'):
# return [underdict(value) for value in obj]
else:
return obj
def inject_attrs(cont):
attrs = underdict(cont._container)
attrs['name'] = (
attrs['name'] if 'name' in attrs else attrs['names'][0]).strip('/')
attrs['short_id'] = attrs['id'][:12]
if 'state' in attrs and 'status' in attrs['state']:
attrs['status'] = attrs['state']['status']
if 'config' in attrs and 'labels' in attrs['config']:
attrs['labels'] = attrs['config']['labels']
cont.attrs = Prodict.from_dict(attrs)
return cont
def pack_ports(plist=[]):
return ':'.join([str(p) for p in plist])
def unpack_ports(pstr):
return pstr and [int(p) for p in pstr.split(':')] or []
def def_labels(a_ports=[]):
return Prodict(inband='inband', ports=pack_ports(a_ports))
def short_info(container):
if hasattr(container, 'attrs'):
inject_attrs(container)
ca = container.attrs
dic = Prodict.from_dict({key: getattr(container.attrs, key)
for key in ['short_id', 'name', 'status']})
dic.ports = []
if 'labels' in ca:
if 'ports' in ca.labels:
dic.ports = unpack_ports(ca.labels.ports)
return dic
def image_name(name):
return f'rst/service-{name}'
class Dock():
"""
"""
def __init__(self, images_path, images, container_params, container_env, **kwargs):
self.dc = aiodocker.Docker()
self.initial_ports = list(range(8900, 8999))
self.available_ports = list(self.initial_ports)
self.images_path = images_path
self.images = Prodict.from_dict(images)
self.container_env = Prodict.from_dict(container_env)
self.container_params = Prodict.from_dict(container_params)
async def inspect_containers(self):
conts = await self.containers()
for cont in conts.values():
await self.inspect_container(cont)
return [short_info(cont) for cont in conts.values()]
async def inspect_container(self, cont):
logger.info(f"inspecting container {cont.attrs.name}")
lbs = cont.attrs.labels
for port in lbs.ports and unpack_ports(lbs.ports) or []:
logger.info(f' -> {lbs.inband} port:{port}')
self.allocate_port(port)
async def conts_list(self):
conts = await self.containers()
return [short_info(cont) for cont in conts.values()]
async def get(self, name):
conts = await self.containers()
return conts.get(name, None)
async def containers(self):
filters = ujson.dumps({
'label': ['inband=inband']
})
conts = await self.dc.containers.list(all=True, filters=filters)
for cont in conts:
await cont.show()
return {(cont.attrs.name): inject_attrs(cont) for cont in conts}
def allocate_port(self, port=None):
if port and port in self.available_ports:
self.available_ports.remove(port)
return port
return self.available_ports.pop()
async def remove_container(self, name):
await self.stop_container(name)
conts = await self.containers()
if name in list(conts.keys()):
logger.info(f"removing container {name}")
await conts[name].delete()
return True
async def stop_container(self, name):
conts = await self.containers()
if name in list(conts.keys()):
logger.info(f"stopping container {name}")
await conts[name].stop()
return True
async def restart_container(self, name):
conts = await self.containers()
if name in list(conts.keys()):
logger.info(f"restarting container {name}")
await conts[name].restart()
return True
async def create_image(self, name, path):
img_id = None
path = Path(path).resolve()
with subprocess.Popen(tar_image_cmd(path), stdout=subprocess.PIPE) as proc:
img_params = Prodict.from_dict({
'fileobj': proc.stdout,
'encoding': 'identity',
'tag': name,
'labels': def_labels(),
'stream': True
})
logger.info(f"building image {img_params} from {path}")
async for chunk in await self.dc.images.build(**img_params):
if isinstance(chunk, dict):
logger.debug(chunk)
if 'aux' in chunk:
img_id = underdict(chunk['aux'])
else:
logger.debug('chunk: %s %s', type(chunk), chunk)
logger.info('image created %s', img_id)
img = await self.dc.images.get(name)
return Prodict.from_dict(underdict(img))
async def run_container(self, name, params):
# build custom images
if False:
img_path = ''
else:
# rebuild base image
await self.create_image(self.images.base.name, self.images.base.path)
# params for service image
img_path = self.images.collection.path
# service image
service_img_name = f'rst/service-{name}'
img = await self.create_image(service_img_name, img_path)
def take_port(): return {
'HostIp': self.container_params.bind_ip,
'HostPort': str(self.allocate_port())}
ports = {port: [take_port()]
for port in img.container_config.exposed_ports.keys() or {}}
a_ports = [port[0]['HostPort'] for port in ports.values()]
env = {'NAME': name}
env.update(self.container_env)
config = Prodict.from_dict({
'Image': img.id,
'Hostname': name,
'Cmd': name,
'Ports': ports,
'Labels': def_labels(a_ports=a_ports),
'Env': [f"{k}={v}" for k, v in env.items()],
'StopSignal': 'SIGTERM',
'HostConfig': {
'RestartPolicy': {'Name': 'unless-stopped'},
'PortBindings': ports,
'NetworkMode': self.container_params.network,
'Memory': self.container_params.memory
}
})
print(config)
logger.info(f"starting container {name}. ports: {config.Ports}")
c = await self.dc.containers.create_or_replace(name, config)
await c.start()
await c.show()
c = inject_attrs(c)
logger.info(f'started container {c.attrs.name} [{c.attrs.id}]')
return short_info(c)
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
def has_usable_password(context):
return context['request'].user.has_usable_password
link_logout = Link(
icon='fa fa-sign-out', text=_('Logout'), view='authentication:logout_view'
)
link_password_change = Link(
condition=has_usable_password, icon='fa fa-key', text=_('Change password'),
view='authentication:password_change_view'
)
|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from itertools import product
import numpy as np
import pandas as pd
from numpy.random import randn
from pandas.core.groupby import DataFrameGroupBy as GroupBy
from legate import pandas as lp
from legate.pandas.frontend.groupby import GroupBy as LegateGroupBy
aggs = ["sum", "min", "max"]
n = 32
def to_pandas(ldf):
columns = {}
for column in ldf.columns:
columns[column] = ldf[column].to_numpy()
return pd.DataFrame(columns, index=ldf.index)
def sort_and_compare(df1, df2):
df1 = df1.sort_index()
df2 = df2.sort_index()
return df1.equals(df2)
for pair in product(aggs, aggs[1:] + aggs[:1]):
agg1, agg2 = pair
if agg1 == agg2:
f = getattr(GroupBy, agg1)
mf = getattr(LegateGroupBy, agg1)
else:
def f(g):
return getattr(GroupBy, "agg")(g, {"c1": agg1, "c2": agg2})
def mf(g):
return getattr(LegateGroupBy, "agg")(g, {"c1": agg1, "c2": agg2})
keys1 = [1, 4, 2, 3, 1, 3, 1]
keys2 = [1.0, 4.0, np.nan, 3, np.nan, 2, 1]
key_dtype1 = np.int64
key_dtype2 = np.float64
print(
"Agg for c1: %s, Agg for c2: %s, Key type1: %s, Key type2: %s"
% (agg1, agg2, str(key_dtype1), str(key_dtype2))
)
df = pd.DataFrame(
{
"c1": np.array(randn(n) * 100.0, dtype=np.int64),
"c2": np.array([np.nan] * n, dtype=np.float64),
"c3": np.array(
(keys1 * ((n + len(keys1) - 1) // len(keys1)))[:n],
dtype=np.dtype(key_dtype1),
),
"c4": np.array(
(keys2 * ((n + len(keys2) - 1) // len(keys2)))[:n],
dtype=np.dtype(key_dtype2),
),
}
)
ldf = lp.DataFrame(df)
out_df = f(df.groupby(["c3", "c4"], sort=False))
out_df.index.names = ["c5", "c6"]
out_ldf = mf(ldf.groupby(["c3", "c4"], sort=False, method="hash"))
# XXX: Note that sum reductions in Pandas yield 0.0 when all values are
# NaNs, whereas both Legate and cuDF produce nulls in that case.
# Here we replace NaNs/nulls with 0.0 to ignore this semantics
# difference.
out_df = out_df.fillna(0.0)
out_ldf = out_ldf.fillna(0.0)
assert sort_and_compare(out_df, to_pandas(out_ldf))
|
import numpy as np
import scipy as sc
import os
import time
from sklearn.datasets import make_circles
# CREAR DATASET
n = 10 # FILAS DEL DATASET
p = 2 # COLUMNAS DEL DATASET
xi = [[-1, 0],[1, 0], [-0.5, 0], [0.5, 0]]
yi = [0,0,1,1]
x, y = make_circles(n_samples=n, factor=0.5, noise=0.05)
y = y[:, np.newaxis]
#x = np.array(xi)
#y = np.array(yi)
# # CLASE DE LA CAPA DE LA RED
class neural_layer():
def __init__(self, w, b, act_f):
self.act_f = act_f
# vector de bias, se puede multiplicar porque type es numpy.ndarray
# multiplica por 2 - 1, para que los valores de rand vayan de -1 < x < 1 pero el shape es [1, n_neur] y [n_conn, n_neur]
self.b = b
self.w = w
# # FUNCION DE ACTIVACION
sigm = (lambda x: 1 / (1 + np.e ** ( - x )),
lambda x: x * (1 - x))
relu = lambda x: np.maximum(0, x)
def _create_nn(topology, act_f):
nn = []
for l, value in enumerate(topology[:-1]):
nn.append(neural_layer(np.random.randn(value,topology[l+1]), np.random.randn(1, topology[l+1]), act_f))
return nn
def create_nn(act_f):
nn = []
#layer 1
nn.append(neural_layer(w=np.array([[-0.13, -0.52], [-0.35, -0.18]]), b=np.array([-0.7, 0.3]),act_f=act_f))
#layer 2
nn.append(neural_layer(w=np.array([[-0.5, 0.2], [0.07, 0.55]]), b=np.array([0.55, -0.9]),act_f=act_f))
#layer 3
nn.append(neural_layer(w=np.array([-0.2, -0.6]), b=np.array([-0.4]),act_f=act_f))
return nn
topology=[p, 4, 8, 4, 1]
neural_net = _create_nn(topology,sigm)
#print(red)
# ERROR CUADRATICO MEDIO
l2_cost = (lambda Yp, Yr: np.mean((Yp-Yr) ** 2),
lambda Yp, Yr: (Yp-Yr))
def train(neural_net, x, y, l2_cost, rate = 0.5):
out = [(None,x) ]
zetas = [x]
alphas = [None]
#FORWARD PASS
for l, layer in enumerate(neural_net):
z = np.dot(out[-1][1], neural_net[l].w) + neural_net[l].b
a = neural_net[l].act_f[0](z)
zetas.append(z)
alphas.append(a)
out.append((z,a))
deltas = []
# neural_net[0-6]
for l in reversed(range(0, len(neural_net))):
# los indices out[l+1] son porque en out[0] hemos guardado los datos que provienen del dataset
z = out[l+1][0]
a = out[l+1][1]
# BACKWARD PASS
if l == len(neural_net)-1:
# calcular delta en ultima capas (derivada del costo* derivada de la activacion)
deltas.insert(0, l2_cost[1](a, y) * neural_net[l].act_f[1](a))
else:
deltas.insert(0, np.dot(deltas[0],_W.T) * neural_net[l].act_f[1](a))
_W = neural_net[l].w
# GRADIENT DESCENT
neural_net[l].b = neural_net[l].b - np.mean(deltas[0], axis = 0, keepdims=True) * rate
neural_net[l].w = neural_net[l].w - np.dot(out[l][1].T,deltas[0]) * rate
time.sleep(0.1)
os.system('clear')
print("\n Salida de la Red Neuronal")
print(out[-1][1])
print("\n Salida Esperada")
print(y)
for g in range(9000):
train(neural_net, x, y, l2_cost, 0.9)
|
"""
Evaluate the model using Eigen split of KITTI dataset
- prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py
"""
import argparse
import os
import cv2
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from eval_utils import compute_errors, compute_scale_and_shift
from network import Pydnet
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class KITTILoader(object):
def __init__(self, params):
self.params = params
self.height = params["height"]
self.width = params["width"]
self.data_list_file = params["data_list_file"]
self.data_path = params["data_path"]
self.num_workers = 4
self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str)
self.default_img_shape = None
def read_and_decode(self, filename_queue):
"""Read jpeg file from file system"""
img0_name = tf.strings.join([self.data_path, "/", filename_queue, ".jpg"])
img0 = tf.image.decode_jpeg(tf.io.read_file(img0_name), channels=3)
img0 = tf.cast(img0, tf.float32)
return img0
def preprocess(self, filename_queue):
"""Prepare single image at testing time"""
img0 = self.read_and_decode(filename_queue)
img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
img0.set_shape([self.height, self.width, 3])
img0 = img0 / 255.0
return img0
def create_iterator(self, num_parallel_calls=4):
"""Create iterator"""
data_list = tf.convert_to_tensor(self.data_list, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(data_list)
dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(1)
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
return iterator
def read_test_files(test_file) -> list:
"""Read test files from txt file"""
assert os.path.exists(test_file)
with open(test_file, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def run_inference(opts):
"""Run the model on KITTI"""
network_params = {"height": 320, "width": 640, "is_training": False}
dataset_params = {
"height": 320,
"width": 640,
"data_path": opts.data_path,
"data_list_file": opts.data_list_file,
}
dataset = KITTILoader(dataset_params)
iterator = dataset.create_iterator()
batch_img = iterator.get_next()
network = Pydnet(network_params)
predicted_idepth = network.forward(batch_img)
predicted_idepth = tf.nn.relu(predicted_idepth)
# restore graph
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(iterator.initializer)
saver.restore(sess, opts.ckpt)
os.makedirs(opts.dest, exist_ok=True)
test_images = read_test_files(opts.data_list_file)
num_images = len(test_images)
with tqdm(total=num_images) as pbar:
for i in range(num_images):
idepth = sess.run(predicted_idepth)
idepth = np.squeeze(idepth)
min_idepth = idepth.min()
max_idepth = idepth.max()
norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
norm_idepth *= 255.0
target_path = os.path.join(opts.data_path, f"{test_images[i]}.jpg")
target = cv2.imread(target_path)
h, w = target.shape[:2]
norm_idepth = cv2.resize(norm_idepth, (w, h))
img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
pbar.update(1)
print("Inference done!")
def eval(opts):
"""Compute error metrics."""
errors = []
test_images = read_test_files(opts.data_list_file)
print("=> loading gt data")
gt_depths = np.load(opts.gt_path, fix_imports=True, encoding="latin1", allow_pickle=True)[
"data"
]
print("=> starting evaluation")
with tqdm(total=len(test_images)) as pbar:
for i in range(len(test_images)):
target = gt_depths[i]
pred_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
prediction_idepth = cv2.imread(pred_path, -1) / 256.0
mask = (target > 1e-3) & (target < opts.max_depth)
target_idepth = np.zeros_like(target)
target_idepth[mask == 1] = 1.0 / target[mask == 1]
scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
prediction_idepth_aligned = scale * prediction_idepth + shift
disparity_cap = 1.0 / opts.max_depth
prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
target = target[mask == 1]
errors.append(compute_errors(target, prediciton_depth_aligned))
pbar.update(1)
mean_errors = np.array(errors).mean(0)
labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
for i in range(len(labels)):
print(f"{labels[i]}:{mean_errors[i]}")
print("Evaluation done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate depth network on KITTI")
parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
parser.add_argument("--data_path", type=str, help="path to kitti", required=True)
parser.add_argument("--gt_path", type=str, help="path to gt_depths.npz", required=True)
parser.add_argument(
"--data_list_file", type=str, help="path to data list", default="test_kitti.txt"
)
parser.add_argument("--dest", type=str, help="prediction folder", default="kitti")
parser.add_argument("--max_depth", type=float, help="maximum depth value", default=80.0)
opts = parser.parse_args()
run_inference(opts)
eval(opts)
|
from django.contrib import admin
from django import forms
from django.db import models
from django.conf import settings
from academic.content.models import *
admin.site.register(Download)
|
import csv
result = list(csv.reader(open("predictions_test.csv", "r", encoding="utf-8")))
result.remove(result[0])
real_result = [i[0] for i in result]
oringin_answer = list(csv.reader(
open("test_stances_unlabeled.csv", "r", encoding="utf-8")))
oringin_answer.remove(oringin_answer[0])
real_answer = []
for item in oringin_answer:
if item:
real_answer.append(item[2])
unrelated_samples = 0
for item in real_answer:
if item == 'unrelated':
unrelated_samples += 1
related_samples = len(real_answer) - unrelated_samples
unrelated_result = 0
related_result = 0
samples_num = len(real_result)
for i in range(samples_num):
if real_result[i] == real_answer[i]:
if real_result[i] == 'unrelated':
unrelated_result += 1
else:
related_result += 1
score1 = unrelated_result/unrelated_samples
score2 = related_result/related_samples
relative_score = score1*0.25+score2*0.75
print(relative_score)
|
# relative import hack
# https://stackoverflow.com/questions/16981921/relative-imports-in-python-3
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="river-engine",
version="0.0.1a",
author="cysnake4713",
author_email="cysnake4713@gmail.com",
description="A framework to synchronize between different data source",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/cysnake4713/river-engine",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
from __future__ import print_function
import maya.cmds as cmds
def cancelCallback(windowID, *pArgs ):
if cmds.window( windowID, exists=True ):
cmds.deleteUI( windowID )
def text_val(field):
return cmds.textField( field, query=True, text=True )
|
import sys
import os
from time import time
from glob import glob
from scipy import io
import multiprocessing as mp
sys.path.append('../pc2convex')
sys.path.append('../npy2mat')
from npy2mat import mat2npy
from my_utils import *
from frame import frame_gen
from segmentation import label_pts, segmentation
import config
if_vis = True
# on server
vis_dir = '/home/seg/data/seg_vis/'
data_path = '/home/seg/data/a3d2mat/*'
# on laptop
# vis_dir = '../output/seg_vis/'
# data_path = '../data/*'
def label_mat(fpath, vis_dir=None):
tic = time()
fname = get_name(fpath)
dset = io.loadmat(fpath)
data = mat2npy(dset)
bita = compress_data(data>0, config.ratio)
n_layers = 40
layers = np.linspace(0, bita.shape[2] - 1, n_layers)
frames, flags = frame_gen(bita, layers, if_plot=False)
label_fcn = segmentation(frames, flags, bita)
labels = label_pts(dset['pts'], label_fcn=label_fcn, scaled=False)
dset['labels'] = labels.ravel()
dset['frames'] = frames
dset['flags'] = flags
io.savemat(fpath, dset)
toc = time()
print(fname, ' is labeled in {}s'.format(toc - tic))
if if_vis:
print('visualization is enabled')
pts_ = get_points(bita, thresh=0)
labels_ = label_pts(pts_, label_fcn, scaled=True).ravel()
seg_vis2d(pts_, labels_, fname, output_dir=vis_dir, savefig=True)
print(fname, 'is visualized in {}s'.format(time() - toc))
if __name__ == '__main__':
data_dir = glob(data_path)
print('{} .mat files in total'.format(len(data_dir)))
if not os.path.exists(vis_dir):
os.mkdir(vis_dir)
pool = mp.Pool()
print('{} core is available.'.format(mp.cpu_count()))
for f in data_dir:
# label_mat(f, vis_dir)
pool.apply_async(label_mat, args=(f, vis_dir))
pool.close()
pool.join()
print('task complete')
|
from flask_app import application
if __name__ == '__main__':
application.run(debug=True, host='0.0.0.0', port='5000')
|
from typing import Any, Dict
from fennel.client.actions import result
from fennel.client.state import get_status
from fennel.exceptions import ResultsDisabled, TaskFailed
from fennel.job import Job
from fennel.utils import EMPTY
class AsyncResult:
def __init__(self, job: Job, app):
"""
A handle for a task that is being processed by workers via the task queue.
Conceptually similar to the `AsyncResult` from the mutliprocessing library.
"""
self.job = job
self.app = app
self._result: Dict = None
def status(self):
"""
Return the status of the task execution.
Examples
--------
>>> @app.task
>>> def bar(x)
... time.sleep(x)
... return x
...
>>> x = bar.delay(5)
>>> x.status()
SENT
>>> x.status() # After roughly 5 seconds...
SUCCESS
"""
return get_status(self.app, self.job.uuid)
def get(self, timeout: int = EMPTY) -> Any:
"""
Wait for the result to become available and return it.
Raises
------
:exc:`fennel.exceptions.TaskFailed`
If the original function raised an exception.
:exc:`fennel.exceptions.Timeout`
If > `timeout` seconds elapse before a result is available.
Examples
--------
>>> @app.task(retries=0)
>>> def foo(x):
... return x
...
>>> x = foo.delay(7)
>>> x.get() # Wait for the result.
7
Warning
-------
You must have results storage enabled
(:attr:`fennel.settings.Settings.results_enabled`)
If you have retries enabled, they may be rescheduled many times, so you may
prefer to use retries=0 for tasks whose result you intend to wait for.
"""
if not self.app.settings.results_enabled:
raise ResultsDisabled
if timeout is EMPTY:
timeout = self.app.settings.task_timeout
if self._result is None:
self._result = result(self.app, self.job, timeout) # Raises Timeout
exc, val = self._result["exception"], self._result["return_value"]
if exc:
raise TaskFailed(**exc)
else:
return val
def __repr__(self) -> str:
return f"AsyncResult(uuid={self.job.uuid})"
|
import requests
import json
url = "http://127.0.0.1:5000/tweets/"
json_input={"message":"oxygen AND agra AND verified"}
j_data=json.dumps(json_input)
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.post(url, data=j_data, headers=headers)
print(r, r.text)
|
#!/usr/bin/python2.7
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scenario_one import scenario_one
from scheduler import scheduler
from simulation import run_scenario
# Scenario 3: Same as scenario 2, but now we let the leases
# expire before the server comes back.
def scenario_three(reporter):
job = scenario_one(reporter)
scheduler.add_relative(120, lambda: job.lose_master())
scheduler.add_relative(190, lambda: job.trigger_master_election())
reporter.set_filename('scenario_three')
if __name__ == '__main__':
run_scenario(lambda reporter: scenario_three(reporter))
|
from tamproxy import Sketch, SyncedSketch, Timer
from tamproxy.devices import Motor
# Cycles a motor back and forth between -255 and 255 PWM every ~5 seconds
class MotorWrite(Sketch):
def setup(self):
self.motor = Motor(self.tamp, 2, 3)
self.motor.write(1,0)
self.delta = 1
self.motorval = 0
self.timer = Timer()
def loop(self):
if (self.timer.millis() > 10):
self.timer.reset()
if abs(self.motorval) == 255: self.delta = -self.delta
self.motorval += self.delta
self.motor.write(self.motorval>0, abs(self.motorval))
if __name__ == "__main__":
sketch = MotorWrite()
sketch.run()
|
# Use GPS locations and elapsed time to get maximum speeds.
from datetime import datetime
from osgeo import ogr
import ch7funcs
date_format = '%Y-%m-%d %H:%M:%S.%f'
ds = ogr.Open(r'D:\osgeopy-data\Galapagos')
lyr = ds.GetLayerByName('albatross_lambert')
# Loop through each tag, initialize max_speed to 0, and limit the GPS
# locations to that tag.
for tag_id in ch7funcs.get_unique(ds, 'albatross_lambert', 'tag_id'):
max_speed = 0
lyr.SetAttributeFilter("tag_id ='{}'".format(tag_id))
# Get the timestamp for the first point and convert it to a datetime.
row = next(lyr)
ts = row.GetField('timestamp')
previous_time = datetime.strptime(ts, date_format)
# Loop through the rest of the locations for the current tag.
for row in lyr:
# Get the current timestamp, convert to a datetime, and calculate
# the amount of time since the previous location.
ts = row.GetField('timestamp')
current_time = datetime.strptime(ts, date_format)
elapsed_time = current_time - previous_time
hours = elapsed_time.total_seconds() / 3600
# Use the distance you calculated in listing 7.6 to calculate speed.
distance = row.GetField('distance')
speed = distance / hours
# Keep this speed if it's the largest seen so far.
max_speed = max(max_speed, speed)
# When done looping through the locations for this tag, print out the
# max speed.
print 'Max speed for {0}: {1}'.format(tag_id, max_speed)
|
"""Schema module for GraphQL."""
from collections import OrderedDict
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
import graphene
from graphene.types import generic
from nautobot.circuits.graphql.types import CircuitTerminationType
from nautobot.core.graphql.utils import str_to_var_name
from nautobot.core.graphql.generators import (
generate_attrs_for_schema_type,
generate_computed_field_resolver,
generate_custom_field_resolver,
generate_relationship_resolver,
generate_restricted_queryset,
generate_schema_type,
generate_null_choices_resolver,
)
from nautobot.core.graphql.types import ContentTypeType
from nautobot.dcim.graphql.types import (
CableType,
CablePathType,
ConsoleServerPortType,
DeviceType,
InterfaceType,
RackType,
SiteType,
)
from nautobot.extras.registry import registry
from nautobot.extras.models import ComputedField, CustomField, Relationship
from nautobot.extras.choices import CustomFieldTypeChoices, RelationshipSideChoices
from nautobot.extras.graphql.types import TagType
from nautobot.ipam.graphql.types import AggregateType, IPAddressType, PrefixType
from nautobot.virtualization.graphql.types import VirtualMachineType, VMInterfaceType
logger = logging.getLogger("nautobot.graphql.schema")
registry["graphql_types"] = OrderedDict()
registry["graphql_types"]["circuits.circuittermination"] = CircuitTerminationType
registry["graphql_types"]["contenttypes.contenttype"] = ContentTypeType
registry["graphql_types"]["dcim.cable"] = CableType
registry["graphql_types"]["dcim.cablepath"] = CablePathType
registry["graphql_types"]["dcim.consoleserverport"] = ConsoleServerPortType
registry["graphql_types"]["dcim.device"] = DeviceType
registry["graphql_types"]["dcim.interface"] = InterfaceType
registry["graphql_types"]["dcim.rack"] = RackType
registry["graphql_types"]["dcim.site"] = SiteType
registry["graphql_types"]["extras.tag"] = TagType
registry["graphql_types"]["ipam.aggregate"] = AggregateType
registry["graphql_types"]["ipam.ipaddress"] = IPAddressType
registry["graphql_types"]["ipam.prefix"] = PrefixType
registry["graphql_types"]["virtualization.virtualmachine"] = VirtualMachineType
registry["graphql_types"]["virtualization.vminterface"] = VMInterfaceType
STATIC_TYPES = registry["graphql_types"].keys()
CUSTOM_FIELD_MAPPING = {
CustomFieldTypeChoices.TYPE_INTEGER: graphene.Int(),
CustomFieldTypeChoices.TYPE_TEXT: graphene.String(),
CustomFieldTypeChoices.TYPE_BOOLEAN: graphene.Boolean(),
CustomFieldTypeChoices.TYPE_DATE: graphene.Date(),
CustomFieldTypeChoices.TYPE_URL: graphene.String(),
CustomFieldTypeChoices.TYPE_SELECT: graphene.String(),
}
def extend_schema_type(schema_type):
"""Extend an existing schema type to add fields dynamically.
The following type of dynamic fields/functions are currently supported:
- Queryset, ensure a restricted queryset is always returned.
- Custom Field, all custom field will be defined as a first level attribute.
- Tags, Tags will automatically be resolvable.
- Config Context, add a config_context attribute and resolver.
- Relationships, all relationships will be defined as a first level attribute.
To insert a new field dynamically,
- The field must be declared in schema_type._meta.fields as a graphene.Field.mounted
- A Callable attribute name "resolver_<field_name>" must be defined at the schema_type level
"""
model = schema_type._meta.model
#
# Queryset
#
setattr(schema_type, "get_queryset", generate_restricted_queryset())
#
# Custom Fields
#
schema_type = extend_schema_type_custom_field(schema_type, model)
#
# Tags
#
schema_type = extend_schema_type_tags(schema_type, model)
#
# Config Context
#
schema_type = extend_schema_type_config_context(schema_type, model)
#
# Relationships
#
schema_type = extend_schema_type_relationships(schema_type, model)
#
# Computed Fields
#
schema_type = extend_schema_type_computed_field(schema_type, model)
#
# Add resolve_{field.name} that has null=False, blank=True, and choices defined to return null
#
schema_type = extend_schema_type_null_field_choice(schema_type, model)
return schema_type
def extend_schema_type_null_field_choice(schema_type, model):
"""Extends the schema fields to add fields that can be null, blank=True, and choices are defined.
Args:
schema_type (DjangoObjectType): GraphQL Object type for a given model
model (Model): Django model
Returns:
schema_type (DjangoObjectType)
"""
# This is a workaround implemented for https://github.com/nautobot/nautobot/issues/466#issuecomment-877991184
# We want to iterate over fields and see if they meet the criteria: null=False, blank=True, and choices defined
for field in model._meta.fields:
# Continue onto the next field if it doesn't match the criteria
if not all((not field.null, field.blank, field.choices)):
continue
field_name = f"{str_to_var_name(field.name)}"
resolver_name = f"resolve_{field_name}"
if hasattr(schema_type, field_name):
logger.warning(
f"Unable to add {field.name} to {schema_type._meta.name} "
f"because there is already an attribute with the same name ({field_name})"
)
continue
setattr(
schema_type,
resolver_name,
generate_null_choices_resolver(field.name, resolver_name),
)
return schema_type
def extend_schema_type_custom_field(schema_type, model):
"""Extend schema_type object to had attribute and resolver around custom_fields.
Each custom field will be defined as a first level attribute.
Args:
schema_type (DjangoObjectType): GraphQL Object type for a given model
model (Model): Django model
Returns:
schema_type (DjangoObjectType)
"""
cfs = CustomField.objects.get_for_model(model)
prefix = ""
if settings.GRAPHQL_CUSTOM_FIELD_PREFIX and isinstance(settings.GRAPHQL_CUSTOM_FIELD_PREFIX, str):
prefix = f"{settings.GRAPHQL_CUSTOM_FIELD_PREFIX}_"
for field in cfs:
field_name = f"{prefix}{str_to_var_name(field.name)}"
resolver_name = f"resolve_{field_name}"
if hasattr(schema_type, field_name):
logger.warning(
f"Unable to add the custom field {field.name} to {schema_type._meta.name} "
f"because there is already an attribute with the same name ({field_name})"
)
continue
setattr(
schema_type,
resolver_name,
generate_custom_field_resolver(field.name, resolver_name),
)
if field.type in CUSTOM_FIELD_MAPPING:
schema_type._meta.fields[field_name] = graphene.Field.mounted(CUSTOM_FIELD_MAPPING[field.type])
else:
schema_type._meta.fields[field_name] = graphene.Field.mounted(graphene.String())
return schema_type
def extend_schema_type_computed_field(schema_type, model):
"""Extend schema_type object to had attribute and resolver around computed_fields.
Each computed field will be defined as a first level attribute.
Args:
schema_type (DjangoObjectType): GraphQL Object type for a given model
model (Model): Django model
Returns:
schema_type (DjangoObjectType)
"""
cfs = ComputedField.objects.get_for_model(model)
prefix = ""
if settings.GRAPHQL_COMPUTED_FIELD_PREFIX and isinstance(settings.GRAPHQL_COMPUTED_FIELD_PREFIX, str):
prefix = f"{settings.GRAPHQL_COMPUTED_FIELD_PREFIX}_"
for field in cfs:
field_name = f"{prefix}{str_to_var_name(field.slug)}"
resolver_name = f"resolve_{field_name}"
if hasattr(schema_type, field_name):
logger.warning(
"Unable to add the computed field %s to %s because there is already an attribute with the same name (%s)",
field.slug,
schema_type._meta.slug,
field_name,
)
continue
setattr(
schema_type,
resolver_name,
generate_computed_field_resolver(field.slug, resolver_name),
)
schema_type._meta.fields[field_name] = graphene.Field.mounted(graphene.String())
return schema_type
def extend_schema_type_tags(schema_type, model):
"""Extend schema_type object to had a resolver for tags.
Args:
schema_type (DjangoObjectType): GraphQL Object type for a given model
model (Model): Django model
Returns:
schema_type (DjangoObjectType)
"""
fields_name = [field.name for field in model._meta.get_fields()]
if "tags" not in fields_name:
return schema_type
def resolve_tags(self, args):
return self.tags.all()
setattr(schema_type, "resolve_tags", resolve_tags)
return schema_type
def extend_schema_type_config_context(schema_type, model):
"""Extend schema_type object to had attribute and resolver around config_context.
Args:
schema_type (DjangoObjectType): GraphQL Object type for a given model
model (Model): Django model
Returns:
schema_type (DjangoObjectType)
"""
fields_name = [field.name for field in model._meta.get_fields()]
if "local_context_data" not in fields_name:
return schema_type
def resolve_config_context(self, args):
return self.get_config_context()
schema_type._meta.fields["config_context"] = graphene.Field.mounted(generic.GenericScalar())
setattr(schema_type, "resolve_config_context", resolve_config_context)
return schema_type
def extend_schema_type_relationships(schema_type, model):
"""Extend the schema type with attributes and resolvers corresponding
to the relationships associated with this model."""
ct = ContentType.objects.get_for_model(model)
relationships_by_side = {
"source": Relationship.objects.filter(source_type=ct),
"destination": Relationship.objects.filter(destination_type=ct),
}
prefix = ""
if settings.GRAPHQL_RELATIONSHIP_PREFIX and isinstance(settings.GRAPHQL_RELATIONSHIP_PREFIX, str):
prefix = f"{settings.GRAPHQL_RELATIONSHIP_PREFIX}_"
for side, relationships in relationships_by_side.items():
for relationship in relationships:
peer_side = RelationshipSideChoices.OPPOSITE[side]
# Generate the name of the attribute and the name of the resolver based on the slug of the relationship
# and based on the prefix
rel_name = f"{prefix}{str_to_var_name(relationship.slug)}"
resolver_name = f"resolve_{rel_name}"
if hasattr(schema_type, rel_name):
logger.warning(
f"Unable to add the custom relationship {relationship.slug} to {schema_type._meta.name} "
f"because there is already an attribute with the same name ({rel_name})"
)
continue
# Identify which object needs to be on the other side of this relationship
# and check the registry to see if it is available,
# the schema_type object are organized by identifier in the registry `dcim.device`
peer_type = getattr(relationship, f"{peer_side}_type")
peer_model = peer_type.model_class()
type_identifier = f"{peer_model._meta.app_label}.{peer_model._meta.model_name}"
rel_schema_type = registry["graphql_types"].get(type_identifier)
if not rel_schema_type:
logger.warning(f"Unable to identify the GraphQL Object Type for {type_identifier} in the registry.")
continue
if relationship.has_many(peer_side):
schema_type._meta.fields[rel_name] = graphene.Field.mounted(graphene.List(rel_schema_type))
else:
schema_type._meta.fields[rel_name] = graphene.Field(rel_schema_type)
# Generate and assign the resolver
setattr(
schema_type,
resolver_name,
generate_relationship_resolver(rel_name, resolver_name, relationship, side, peer_model),
)
return schema_type
def generate_query_mixin():
"""Generates and returns a class definition representing a GraphQL schema."""
class_attrs = {}
def already_present(model):
"""Check if a model and its resolvers are staged to added to the Mixin."""
single_item_name = str_to_var_name(model._meta.verbose_name)
list_name = str_to_var_name(model._meta.verbose_name_plural)
if single_item_name in class_attrs:
logger.warning(
f"Unable to register the schema single type '{single_item_name}' in GraphQL, "
f"there is already another type {class_attrs[single_item_name]._type} registered under this name"
)
return True
if list_name in class_attrs:
logger.warning(
f"Unable to register the schema list type '{list_name}' in GraphQL, "
f"there is already another type {class_attrs[list_name]._type} registered under this name"
)
return True
# Generate SchemaType Dynamically for all Models registered in the model_features registry
# - Ensure an attribute/schematype with the same name doesn't already exist
registered_models = registry.get("model_features", {}).get("graphql", {})
for app_name, models in registered_models.items():
for model_name in models:
try:
# Find the model class based on the content type
ct = ContentType.objects.get(app_label=app_name, model=model_name)
model = ct.model_class()
except ContentType.DoesNotExist:
logger.warning(
f"Unable to generate a schema type for the model '{app_name}.{model_name}' in GraphQL,"
"this model doesn't have an associated ContentType, please create the Object manually."
)
continue
type_identifier = f"{app_name}.{model_name}"
if type_identifier in registry["graphql_types"].keys():
# Skip models that have been added statically
continue
schema_type = generate_schema_type(app_name=app_name, model=model)
registry["graphql_types"][type_identifier] = schema_type
# Add all objects in the plugin registry to the main registry
# After checking for conflict
for schema_type in registry["plugin_graphql_types"]:
model = schema_type._meta.model
type_identifier = f"{model._meta.app_label}.{model._meta.model_name}"
if type_identifier in registry["graphql_types"]:
logger.warning(
f'Unable to load schema type for the model "{type_identifier}" as there is already another type '
"registered under this name. If you are seeing this message during plugin development, check to "
"make sure that you aren't using @extras_features(\"graphql\") on the same model you're also "
"defining a custom GraphQL type for."
)
else:
registry["graphql_types"][type_identifier] = schema_type
# Extend schema_type with dynamic attributes for all object defined in the registry
for schema_type in registry["graphql_types"].values():
if already_present(schema_type._meta.model):
continue
schema_type = extend_schema_type(schema_type)
class_attrs.update(generate_attrs_for_schema_type(schema_type))
QueryMixin = type("QueryMixin", (object,), class_attrs)
return QueryMixin
|
import copy
import time
import warnings
import numpy as np
from .bnet import partition
from .mcmc_moves import DAG_edgerev, R_basic_move, R_swap_any
class PartitionMCMC:
"""Partition-MCMC sampler :footcite:`kuipers:2017` with efficient
scoring."""
def __init__(
self,
C,
score,
d,
inv_temp=1.0,
move_weights=[1, 1, 2],
stats=None,
R=None,
):
self.n = len(C)
self.C = C
self.inv_temp = inv_temp
self.score = score
self.d = d
self.stay_prob = 0.01
self._all_moves = [
self.R_basic_move,
self.R_swap_any,
self.DAG_edgerev,
]
self._move_weights = list(move_weights)
self.stats = stats
if self.stats is not None:
for move in self._all_moves:
self.stats["mcmc"][self.inv_temp][move.__name__][
"proposed"
] = 0
self.stats["mcmc"][self.inv_temp][move.__name__][
"accepted"
] = 0
self.stats["mcmc"][self.inv_temp][move.__name__][
"accept_ratio"
] = 0
self._init_moves()
self.R = R
if self.R is None:
self.R = self._random_partition()
self.R_node_scores = self._pi(self.R)
self.R_score = self.inv_temp * sum(self.R_node_scores)
def _init_moves(self):
# This needs to be called if self.inv_temp changes from/to 1.0
move_weights = self._move_weights
if self.inv_temp != 1:
move_weights = self._move_weights[:-1]
# Each move is repeated weights[move] times to allow uniform sampling
# from the list (np.random.choice can be very slow).
self._moves = [
m for m, w in zip(self._all_moves, move_weights) for _ in range(w)
]
def __copy__(self):
return PartitionMCMC(
self.C,
self.score,
self.d,
inv_temp=self.inv_temp,
move_weights=self._move_weights,
stats=self.stats,
R=self.R,
)
def R_basic_move(self, **kwargs):
# NOTE: Is there value in having these as methods?
return R_basic_move(**kwargs)
def R_swap_any(self, **kwargs):
return R_swap_any(**kwargs)
def DAG_edgerev(self, **kwargs):
return DAG_edgerev(**kwargs)
def _valid(self, R):
if sum(len(R[i]) for i in range(len(R))) != self.n:
return False
if len(R) == 1:
return True
for i in range(1, len(R)):
for v in R[i]:
if len(R[i - 1].intersection(self.C[v])) == 0:
return False
return True
def _random_partition(self):
def rp_d_gt0(n):
R = list()
U = list(range(n))
while sum(R) < n:
n_nodes = 1
while (
np.random.random() < (n / 2 - 1) / (n - 1)
and sum(R) + n_nodes < n
):
n_nodes += 1
R.append(n_nodes)
for i in range(len(R)):
# node labels need to be kept as Python ints
# for all the bitmap operations to work as expected
R_i = set(
int(v) for v in np.random.choice(U, R[i], replace=False)
)
R[i] = R_i
U = [u for u in U if u not in R_i]
return tuple(R)
if self.d > 0:
return rp_d_gt0(self.n)
def n(R):
n_nodes = 1
while (
np.random.random() < (self.n / 2 - 1) / (self.n - 1)
and sum(len(R[i]) for i in range(len(R))) + n_nodes < self.n
):
n_nodes += 1
return n_nodes
while True:
U = set(range(self.n))
R = [set(np.random.choice(list(U), n([]), replace=False))]
U = U.difference(R[0])
while len(U) > 0:
pool = list(
U.intersection(
set().union(
{
v
for v in self.C
if set(self.C[v]).intersection(R[-1])
}
)
)
)
if len(pool) == 0:
break
R_i = np.random.choice(
pool, min(n(R), len(pool)), replace=False
)
R.append(set(R_i))
U = U.difference(R[-1])
if self.d > 0:
return tuple(R)
if self._valid(R):
return tuple(R)
def _pi(self, R, R_node_scores=None, rescore=None):
inpart = [0] * sum(len(R[i]) for i in range(len(R)))
for i in range(len(R)):
for v in R[i]:
inpart[v] = i
if R_node_scores is None:
R_node_scores = [0] * len(inpart)
else:
# Don't copy whole list, just the nodes to rescore?
R_node_scores = list(R_node_scores)
if rescore is None:
rescore = set().union(*R)
for v in rescore:
if inpart[v] == 0:
R_node_scores[v] = self.score.sum(v, set(), set())
else:
R_node_scores[v] = self.score.sum(
v, set().union(*R[: inpart[v]]), R[inpart[v] - 1]
)
return R_node_scores
def _rescore(self, R, R_prime):
rescore = list()
UT = dict()
U = set()
T = set()
for i in range(len(R)):
for u in R[i]:
UT[u] = (U, T)
U = U.union(R[i])
T = R[i]
U = set()
T = set()
for i in range(len(R_prime)):
for u in R_prime[i]:
if UT[u] != (U, T):
rescore.append(u)
U = U.union(R_prime[i])
T = R_prime[i]
return rescore
def sample(self):
# NOTE: Multiple points of return, consider refactoring.
def update_stats(accepted):
self.stats["mcmc"][self.inv_temp][move.__name__]["proposed"] += 1
if accepted:
self.stats["mcmc"][self.inv_temp][move.__name__][
"accepted"
] += 1
a = self.stats["mcmc"][self.inv_temp][move.__name__]["accepted"]
if type(a) != int:
a = 0
p = self.stats["mcmc"][self.inv_temp][move.__name__]["proposed"]
try:
ap = a / p
except ZeroDivisionError:
ap = 0.0
self.stats["mcmc"][self.inv_temp][move.__name__][
"accept_ratio"
] = ap
if np.random.rand() > self.stay_prob:
move = self._moves[np.random.randint(len(self._moves))]
if move.__name__ == "DAG_edgerev":
DAG, _ = self.score.sample_DAG(self.R)
# NOTE: DAG equals DAG_prime after this, since no copy
# is made. If necessary, make one.
return_value = move(
DAG=DAG, score=self.score, R=self.R, C=self.C, d=self.d
)
if return_value is False:
return [self.R], np.array([self.R_score])
DAG_prime, ap, edge = return_value
R_prime = partition(DAG_prime)
R_prime_node_scores = self._pi(
R_prime,
R_node_scores=self.R_node_scores,
rescore=self._rescore(self.R, R_prime),
)
elif move.__name__[0] == "R":
return_value = move(R=self.R)
if return_value is False:
return [self.R], np.array([self.R_score])
R_prime, q, q_rev, rescore = return_value
R_prime_node_scores = self._pi(
R_prime, R_node_scores=self.R_node_scores, rescore=rescore
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
ap = (
np.exp(
self.inv_temp * sum(R_prime_node_scores)
- self.R_score
)
* q_rev
/ q
)
R_prime_valid = self._valid(R_prime)
if self.d == 0 and not R_prime_valid:
return [self.R], np.array([self.R_score])
# make this happen in log space?
# if -np.random.exponential() < self.inv_temp * sum(
# R_prime_node_scores
# ) - self.R_score + np.log(q_rev) - np.log(q):
# pass
accepted = False
if np.random.rand() < ap:
accepted = True
self.R = R_prime
self.R_node_scores = R_prime_node_scores
self.R_score = self.inv_temp * sum(self.R_node_scores)
if self.stats is not None:
update_stats(accepted)
return [self.R], np.array([self.R_score])
class MC3:
def __init__(self, chains, stats=None):
self.stats = stats
if self.stats is not None:
self.stats["mc3"]["proposed"] = np.zeros(len(chains) - 1)
self.stats["mc3"]["accepted"] = np.zeros(len(chains) - 1)
self.stats["mc3"]["accept_ratio"] = np.array(
[np.nan] * (len(chains) - 1)
)
self.chains = chains
@staticmethod
def get_inv_temperatures(scheme, M):
linear = [i / (M - 1) for i in range(M)]
quadratic = [1 - ((M - 1 - i) / (M - 1)) ** 2 for i in range(M)]
sigmoid = [
1 / (1 + np.exp((M - 1) * (0.5 - (i / (M - 1))))) for i in range(M)
]
sigmoid[0] = 0.0
sigmoid[-1] = 1.0
return locals()[scheme]
@staticmethod
def get_swap_acceptance_prob(chains, i, j):
"""Compute the swap acceptance probability between chains in indices i
and j."""
ap = sum(chains[i].R_node_scores) * chains[j].inv_temp
ap += sum(chains[j].R_node_scores) * chains[i].inv_temp
ap -= sum(chains[j].R_node_scores) * chains[j].inv_temp
ap -= sum(chains[i].R_node_scores) * chains[i].inv_temp
return ap
@staticmethod
def make_swap(chains, i, j):
R_tmp = chains[i].R
R_node_scores_tmp = chains[i].R_node_scores
chains[i].R = chains[j].R
chains[i].R_node_scores = chains[j].R_node_scores
chains[i].R_score = chains[i].inv_temp * sum(chains[i].R_node_scores)
chains[j].R = R_tmp
chains[j].R_node_scores = R_node_scores_tmp
chains[j].R_score = chains[j].inv_temp * sum(chains[j].R_node_scores)
@classmethod
def adaptive(
cls,
mcmc,
t_budget=None,
stats=None,
target=0.25,
tolerance=0.05,
n_proposals=1000,
max_search_steps=20,
sample_all=False,
strict_binary=False,
log=None,
):
if t_budget is not None:
t0 = time.time()
mcmc0 = copy.copy(mcmc)
mcmc0.inv_temp = 0.0
mcmc0._init_moves()
chains = [mcmc0, mcmc]
if stats is not None:
stats["iters"]["adaptive tempering"] = 0
msg_tmpl = "{:<8}" + "{:<9}" * 2
if log is not None:
log(msg_tmpl.format("chain", "temp^-1", "swap_prob"))
log(msg_tmpl.format("1", "0.0", "-"))
def acceptance_prob(i_target, inv_temp):
chains[i_target].inv_temp = inv_temp
chains[i_target].R = chains[i_target - 1].R
chains[i_target].R_node_scores = chains[i_target - 1].R_node_scores
chains[i_target]._init_moves()
proposed = 0
accepted = 0
while proposed < n_proposals:
if t_budget is not None:
if time.time() - t0 > t_budget:
log.br()
log("Time budget exceeded, terminating.")
exit(1)
if sample_all:
start, end = 0, len(chains)
else:
# Only the target chain and one hotter than it are sampled
start, end = i_target - 1, i_target + 1
for c in chains[start:end]:
if stats is not None:
stats["iters"]["adaptive tempering"] += 1
c.sample()
if sample_all:
j = np.random.randint(len(chains) - 1)
else:
j = i_target - 1
if j == i_target - 1:
proposed += 1
ap = MC3.get_swap_acceptance_prob(chains, j, j + 1)
if -np.random.exponential() < ap:
if j == i_target - 1:
accepted += 1
MC3.make_swap(chains, j, j + 1)
return accepted / proposed
# Commented out option to rerun the temperature estimations
# until the number of chains equals the previous run +/- 1.
# all_done = False
# while not all_done:
for l in range(1):
# start_len = len(chains)
done = False
i = 0
while not done:
i += 1
ub = 1.0
lb = chains[i - 1].inv_temp
chains[i].inv_temp = max(lb, chains[i].inv_temp)
acc_prob = acceptance_prob(i, chains[i].inv_temp)
if log is not None:
log(
msg_tmpl.format(
i + 1,
round(chains[i].inv_temp, 3),
round(acc_prob, 3),
)
)
heat = acc_prob < target
search_steps = 0
while abs(target - acc_prob) > tolerance:
search_steps += 1
if search_steps > max_search_steps:
break
# If strict_binary == False, the ub/lb is set half way
# between the previous ub/lb and current temperature, to
# avoid getting trapped in wrong region.
if heat:
ub = chains[i].inv_temp
if strict_binary is False:
ub += 0.5 * (ub - chains[i].inv_temp)
chains[i].inv_temp = (
chains[i].inv_temp - (chains[i].inv_temp - lb) / 2
)
else:
if abs(chains[i].inv_temp - 1.0) < 1e-4:
break
lb = chains[i].inv_temp
if strict_binary is False:
lb -= 0.5 * (chains[i].inv_temp - lb)
chains[i].inv_temp = (
chains[i].inv_temp + (ub - chains[i].inv_temp) / 2
)
acc_prob = acceptance_prob(i, chains[i].inv_temp)
if log is not None:
log(
msg_tmpl.format(
"",
round(chains[i].inv_temp, 3),
round(acc_prob, 3),
)
)
heat = acc_prob < target
if abs(chains[i].inv_temp - 1.0) < 1e-4:
chains = chains[: i + 1]
done = True
else:
chain = copy.copy(chains[i])
chain.inv_temp = 1.0
chain._init_moves()
chains.append(chain)
chains[-1].inv_temp = 1.0
chains[-1] = copy.copy(chains[-1])
# all_done = len(chains) in range(start_len - 1, start_len + 2)
for c in chains:
c.stats = stats
chains = [copy.copy(c) for c in chains]
return cls(chains, stats=stats)
def sample(self):
for c in self.chains:
c.sample()
i = np.random.randint(len(self.chains) - 1)
if self.stats is not None:
self.stats["mc3"]["proposed"][i] += 1
ap = MC3.get_swap_acceptance_prob(self.chains, i, i + 1)
if -np.random.exponential() < ap:
if self.stats is not None:
self.stats["mc3"]["accepted"][i] += 1
MC3.make_swap(self.chains, i, i + 1)
if self.stats is not None:
self.stats["mc3"]["accept_ratio"][i] = (
self.stats["mc3"]["accepted"][i]
/ self.stats["mc3"]["proposed"][i]
)
return [c.R for c in self.chains[::-1]], np.array(
[sum(c.R_node_scores) for c in self.chains[::-1]]
)
|
#######################################################################
# Copyright (C) #
# 2017-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from math import floor
from tqdm import tqdm
#######################################################################
# Following are some utilities for tile coding from Rich.
# To make each file self-contained, I copied them from
# http://incompleteideas.net/tiles/tiles3.py-remove
# with some naming convention changes
#
# Tile coding starts
class IHT:
"Structure to handle collisions"
def __init__(self, size_val):
self.size = size_val
self.overfull_count = 0
self.dictionary = {}
def count(self):
return len(self.dictionary)
def full(self):
return len(self.dictionary) >= self.size
def get_index(self, obj, read_only=False):
d = self.dictionary
if obj in d:
return d[obj]
elif read_only:
return None
size = self.size
count = self.count()
if count >= size:
if self.overfull_count == 0: print('IHT full, starting to allow collisions')
self.overfull_count += 1
return hash(obj) % self.size
else:
d[obj] = count
return count
def hash_coords(coordinates, m, read_only=False):
if isinstance(m, IHT): return m.get_index(tuple(coordinates), read_only)
if isinstance(m, int): return hash(tuple(coordinates)) % m
if m is None: return coordinates
def tiles(iht_or_size, num_tilings, floats, ints=None, read_only=False):
"""returns num-tilings tile indices corresponding to the floats and ints"""
if ints is None:
ints = []
qfloats = [floor(f * num_tilings) for f in floats]
tiles = []
for tiling in range(num_tilings):
tilingX2 = tiling * 2
coords = [tiling]
b = tiling
for q in qfloats:
coords.append((q + b) // num_tilings)
b += tilingX2
coords.extend(ints)
tiles.append(hash_coords(coords, iht_or_size, read_only))
return tiles
# Tile coding ends
#######################################################################
# all possible actions
ACTION_REVERSE = -1
ACTION_ZERO = 0
ACTION_FORWARD = 1
# order is important
ACTIONS = [ACTION_REVERSE, ACTION_ZERO, ACTION_FORWARD]
# bound for position and velocity
POSITION_MIN = -1.2
POSITION_MAX = 0.5
VELOCITY_MIN = -0.07
VELOCITY_MAX = 0.07
# discount is always 1.0 in these experiments
DISCOUNT = 1.0
# use optimistic initial value, so it's ok to set epsilon to 0
EPSILON = 0
# maximum steps per episode
STEP_LIMIT = 5000
# take an @action at @position and @velocity
# @return: new position, new velocity, reward (always -1)
def step(position, velocity, action):
new_velocity = velocity + 0.001 * action - 0.0025 * np.cos(3 * position)
new_velocity = min(max(VELOCITY_MIN, new_velocity), VELOCITY_MAX)
new_position = position + new_velocity
new_position = min(max(POSITION_MIN, new_position), POSITION_MAX)
reward = -1.0
if new_position == POSITION_MIN:
new_velocity = 0.0
return new_position, new_velocity, reward
# accumulating trace update rule
# @trace: old trace (will be modified)
# @activeTiles: current active tile indices
# @lam: lambda
# @return: new trace for convenience
def accumulating_trace(trace, active_tiles, lam):
trace *= lam * DISCOUNT
trace[active_tiles] += 1
return trace
# replacing trace update rule
# @trace: old trace (will be modified)
# @activeTiles: current active tile indices
# @lam: lambda
# @return: new trace for convenience
def replacing_trace(trace, activeTiles, lam):
active = np.in1d(np.arange(len(trace)), activeTiles)
trace[active] = 1
trace[~active] *= lam * DISCOUNT
return trace
# replacing trace update rule, 'clearing' means set all tiles corresponding to non-selected actions to 0
# @trace: old trace (will be modified)
# @activeTiles: current active tile indices
# @lam: lambda
# @clearingTiles: tiles to be cleared
# @return: new trace for convenience
def replacing_trace_with_clearing(trace, active_tiles, lam, clearing_tiles):
active = np.in1d(np.arange(len(trace)), active_tiles)
trace[~active] *= lam * DISCOUNT
trace[clearing_tiles] = 0
trace[active] = 1
return trace
# dutch trace update rule
# @trace: old trace (will be modified)
# @activeTiles: current active tile indices
# @lam: lambda
# @alpha: step size for all tiles
# @return: new trace for convenience
def dutch_trace(trace, active_tiles, lam, alpha):
coef = 1 - alpha * DISCOUNT * lam * np.sum(trace[active_tiles])
trace *= DISCOUNT * lam
trace[active_tiles] += coef
return trace
# wrapper class for Sarsa(lambda)
class Sarsa:
# In this example I use the tiling software instead of implementing standard tiling by myself
# One important thing is that tiling is only a map from (state, action) to a series of indices
# It doesn't matter whether the indices have meaning, only if this map satisfy some property
# View the following webpage for more information
# http://incompleteideas.net/sutton/tiles/tiles3.html
# @maxSize: the maximum # of indices
def __init__(self, step_size, lam, trace_update=accumulating_trace, num_of_tilings=8, max_size=2048):
self.max_size = max_size
self.num_of_tilings = num_of_tilings
self.trace_update = trace_update
self.lam = lam
# divide step size equally to each tiling
self.step_size = step_size / num_of_tilings
self.hash_table = IHT(max_size)
# weight for each tile
self.weights = np.zeros(max_size)
# trace for each tile
self.trace = np.zeros(max_size)
# position and velocity needs scaling to satisfy the tile software
self.position_scale = self.num_of_tilings / (POSITION_MAX - POSITION_MIN)
self.velocity_scale = self.num_of_tilings / (VELOCITY_MAX - VELOCITY_MIN)
# get indices of active tiles for given state and action
def get_active_tiles(self, position, velocity, action):
# I think positionScale * (position - position_min) would be a good normalization.
# However positionScale * position_min is a constant, so it's ok to ignore it.
active_tiles = tiles(self.hash_table, self.num_of_tilings,
[self.position_scale * position, self.velocity_scale * velocity],
[action])
return active_tiles
# estimate the value of given state and action
def value(self, position, velocity, action):
if position == POSITION_MAX:
return 0.0
active_tiles = self.get_active_tiles(position, velocity, action)
return np.sum(self.weights[active_tiles])
# learn with given state, action and target
def learn(self, position, velocity, action, target):
active_tiles = self.get_active_tiles(position, velocity, action)
estimation = np.sum(self.weights[active_tiles])
delta = target - estimation
if self.trace_update == accumulating_trace or self.trace_update == replacing_trace:
self.trace_update(self.trace, active_tiles, self.lam)
elif self.trace_update == dutch_trace:
self.trace_update(self.trace, active_tiles, self.lam, self.step_size)
elif self.trace_update == replacing_trace_with_clearing:
clearing_tiles = []
for act in ACTIONS:
if act != action:
clearing_tiles.extend(self.get_active_tiles(position, velocity, act))
self.trace_update(self.trace, active_tiles, self.lam, clearing_tiles)
else:
raise Exception('Unexpected Trace Type')
self.weights += self.step_size * delta * self.trace
# get # of steps to reach the goal under current state value function
def cost_to_go(self, position, velocity):
costs = []
for action in ACTIONS:
costs.append(self.value(position, velocity, action))
return -np.max(costs)
# get action at @position and @velocity based on epsilon greedy policy and @valueFunction
def get_action(position, velocity, valueFunction):
if np.random.binomial(1, EPSILON) == 1:
return np.random.choice(ACTIONS)
values = []
for action in ACTIONS:
values.append(valueFunction.value(position, velocity, action))
return np.argmax(values) - 1
# play Mountain Car for one episode based on given method @evaluator
# @return: total steps in this episode
def play(evaluator):
position = np.random.uniform(-0.6, -0.4)
velocity = 0.0
action = get_action(position, velocity, evaluator)
steps = 0
while True:
next_position, next_velocity, reward = step(position, velocity, action)
next_action = get_action(next_position, next_velocity, evaluator)
steps += 1
target = reward + DISCOUNT * evaluator.value(next_position, next_velocity, next_action)
evaluator.learn(position, velocity, action, target)
position = next_position
velocity = next_velocity
action = next_action
if next_position == POSITION_MAX:
break
if steps >= STEP_LIMIT:
print('Step Limit Exceeded!')
break
return steps
# figure 12.10, effect of the lambda and alpha on early performance of Sarsa(lambda)
def figure_12_10():
runs = 30
episodes = 50
alphas = np.arange(1, 8) / 4.0
lams = [0.99, 0.95, 0.5, 0]
steps = np.zeros((len(lams), len(alphas), runs, episodes))
for lamInd, lam in enumerate(lams):
for alphaInd, alpha in enumerate(alphas):
for run in tqdm(range(runs)):
evaluator = Sarsa(alpha, lam, replacing_trace)
for ep in range(episodes):
step = play(evaluator)
steps[lamInd, alphaInd, run, ep] = step
# average over episodes
steps = np.mean(steps, axis=3)
# average over runs
steps = np.mean(steps, axis=2)
for lamInd, lam in enumerate(lams):
plt.plot(alphas, steps[lamInd, :], label='lambda = %s' % (str(lam)))
plt.xlabel('alpha * # of tilings (8)')
plt.ylabel('averaged steps per episode')
plt.ylim([180, 300])
plt.legend()
plt.savefig('../images/figure_12_10.png')
plt.close()
# figure 12.11, summary comparision of Sarsa(lambda) algorithms
# I use 8 tilings rather than 10 tilings
def figure_12_11():
traceTypes = [dutch_trace, replacing_trace, replacing_trace_with_clearing, accumulating_trace]
alphas = np.arange(0.2, 2.2, 0.2)
episodes = 20
runs = 30
lam = 0.9
rewards = np.zeros((len(traceTypes), len(alphas), runs, episodes))
for traceInd, trace in enumerate(traceTypes):
for alphaInd, alpha in enumerate(alphas):
for run in tqdm(range(runs)):
evaluator = Sarsa(alpha, lam, trace)
for ep in range(episodes):
if trace == accumulating_trace and alpha > 0.6:
steps = STEP_LIMIT
else:
steps = play(evaluator)
rewards[traceInd, alphaInd, run, ep] = -steps
# average over episodes
rewards = np.mean(rewards, axis=3)
# average over runs
rewards = np.mean(rewards, axis=2)
for traceInd, trace in enumerate(traceTypes):
plt.plot(alphas, rewards[traceInd, :], label=trace.__name__)
plt.xlabel('alpha * # of tilings (8)')
plt.ylabel('averaged rewards pre episode')
plt.ylim([-550, -150])
plt.legend()
plt.savefig('../images/figure_12_11.png')
plt.close()
if __name__ == '__main__':
figure_12_10()
figure_12_11()
|
#!/bin/python3
def rotLeft(a, d):
b = a[:]
for i in range (0,len(a)):
newLocation = (i + (len(a) - d)) % len(a)
a[newLocation] = b[i]
return a
if __name__ == '__main__':
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
print(result)
|
import os, logging, re
from datetime import datetime
import hashlib
from pprint import pprint
from collections import Counter
import html
import geopandas as gpd
from . import unicodetokeniser
from . import geo
##########
# util
##########
def hash(prefix, str):
return prefix + '_' + hashlib.sha256(str.lower().encode('utf-8')).hexdigest()
def escape_filename(name):
return re.sub(r'\W', '-', name)
def counter_to_list(counter):
list = []
for k, v in counter.most_common():
list.append(k)
return list
def counter_to_object_list(counter, key = 'key', val = 'val'):
list = []
for k, v in counter.most_common():
list.append({
key: k,
val: v
})
return list
##########
# text
##########
def init_tokeniser():
unicodetokeniser.load()
URL_REGEX = re.compile(r'\S+://\S+')
EMAIL_REGEX = re.compile(r'\S+@\S+\.\S+')
ATTAG_REGEX = re.compile(r'@\S+')
STOPWORD_FILE = "stopwords.txt"
def anonymize_text(text):
text = URL_REGEX.sub('', text)
text = EMAIL_REGEX.sub('', text)
text = ATTAG_REGEX.sub('', text)
return text
def clean_text(text):
text = html.unescape(text)
text = unicodetokeniser.remove_control(text)
text = unicodetokeniser.normalise(text)
text = text.lower()
text = unicodetokeniser.translit_punct(text)
text = unicodetokeniser.reduce_whitespace(text)
return text
def tokenise_text(text):
text = clean_text(text)
tokens = unicodetokeniser.tokenise(text)
tokens = unicodetokeniser.split_apostrophes(tokens)
tokens = unicodetokeniser.combine_hyphenated(tokens)
tokens = unicodetokeniser.words_only(tokens)
return tokens
##########
# links
##########
TWEET_URL_REGEX = re.compile(r'twitter\.com/.*/status/(\d+)')
TWITTER_USER_REGEX = re.compile(r'twitter\.com/[^/]+($|\?)')
def include_link(link, tweet_id):
if not link:
return False
m = TWEET_URL_REGEX.search(link) # link to current tweet
if m:
if m.group(1) == tweet_id:
return False
m = TWITTER_USER_REGEX.search(link) # username
if m:
return False
return True
TWITTER_REGEX = re.compile(r'^\w+://(?:www\.)?twitter\.com')
TWEET_URL_SUB_REGEX = re.compile(r'twitter\.com/.*/status/')
OTHER_TWITTER_URL_SUB_REGEX = re.compile(r'twitter\.com/.*/(lists|events|broadcasts|moments|timelines)/')
QUERY_SUB_REGEX = re.compile(r'\?.*$')
def anon_twitter_link(link):
new_link = link
m = TWITTER_REGEX.search(new_link)
if m:
new_link = TWEET_URL_SUB_REGEX.sub('twitter.com/i/web/status/', new_link)
new_link = OTHER_TWITTER_URL_SUB_REGEX.sub(r'twitter.com/i/\1/', new_link)
new_link = QUERY_SUB_REGEX.sub('', new_link)
return new_link
WEBSITE_REGEX = re.compile(r'://(?:www\.)?([^/]+)')
TWITTER_WEBSITE_REGEX = re.compile(r'^\w+://(?:www\.)?(twitter\.com/i/(?:web/status|status|lists|events|broadcasts|moments|timelines))')
def extract_website(link):
if not link:
return None
m = TWITTER_WEBSITE_REGEX.search(link)
if m:
return m.group(1)
m = WEBSITE_REGEX.search(link)
if m:
return m.group(1)
return None
##########
# logging
##########
logging_path = "."
def init_logging(log_path=".", log_name="tracdash", console=True, file=True, level=logging.INFO):
global logging_path
logging_path = log_path
logFormatter = logging.Formatter("%(asctime)s\t[%(threadName)-12.12s]\t[%(levelname)-5.5s]\t%(message)s")
rootLogger = logging.getLogger()
if file:
fileHandler = logging.FileHandler("{0}/{1}-{2}.log".format(log_path, log_name, datetime.now().strftime("%Y_%m_%d-%H_%M_%S")))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
if console:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(level)
rootLogger.info("logging initialised")
def dump_es_error(res, file, n):
with open( os.path.join(logging_path, "err_" + escape_filename(file) + "_" + str(n) + ".txt") , "w") as f:
print('{}\t{}\t{}\terror\t{}'.format( datetime.now(), file, n, len(res['items']) ), file=f)
pprint(res, f)
##########
# geo
##########
NUTS_1_SHP_FILE = "NUTS_Level_1__January_2018__Boundaries.shp"
NUTS_2_SHP_FILE = "NUTS_Level_2__January_2018__Boundaries.shp"
NUTS_3_SHP_FILE = "NUTS_Level_3__January_2018__Boundaries.shp"
def init_geo(geo_search_level, crs="EPSG:4326", shp_path=None,
level1=NUTS_1_SHP_FILE,
level2=NUTS_2_SHP_FILE,
level3=NUTS_3_SHP_FILE):
gdf_nuts_1 = None
gdf_nuts_2 = None
gdf_nuts_3 = None
if not shp_path:
shp_path = os.path.join(os.path.dirname(__file__), "NUTS")
if geo_search_level >= 1:
try:
gdf_nuts_1 = gpd.read_file( os.path.join(shp_path, level1) )
gdf_nuts_1.to_crs(crs=crs, inplace=True)
gdf_nuts_1.rename({'nuts118cd': 'CODE', 'nuts118nm': 'NAME'}, axis=1, inplace=True)
logging.info("Loaded shape file for NUTS Level 1")
except:
logging.exception("Failed to load shape file for NUTS Level 1")
if geo_search_level >= 2:
try:
gdf_nuts_2 = gpd.read_file( os.path.join(shp_path, level2) )
gdf_nuts_2.to_crs(crs=crs, inplace=True)
gdf_nuts_2.rename({'nuts218cd': 'CODE', 'nuts218nm': 'NAME'}, axis=1, inplace=True)
logging.info("Loaded shape file for NUTS Level 2")
except:
logging.exception("Failed to load shape file for NUTS Level 2")
if geo_search_level >= 3:
try:
gdf_nuts_3 = gpd.read_file( os.path.join(shp_path, level3) )
gdf_nuts_3.to_crs(crs=crs, inplace=True)
gdf_nuts_3.rename({'nuts318cd': 'CODE', 'nuts318nm': 'NAME'}, axis=1, inplace=True)
logging.info("Loaded shape file for NUTS Level 3")
except:
logging.exception("Failed to load shape file for NUTS Level 3")
geohelper = geo.GeoHelper(crs, gdf_nuts_1, gdf_nuts_2, gdf_nuts_3)
return geohelper
|
# -*- coding: utf-8 -*-
import pytest
from io import BytesIO
from lxml import etree
from pyramid.path import DottedNameResolver
from shapely.geometry import MultiPolygon, Polygon
from pyramid_oereb.core.records.disclaimer import DisclaimerRecord
from pyramid_oereb.core.records.extract import ExtractRecord
from pyramid_oereb.core.records.glossary import GlossaryRecord
from pyramid_oereb.core.records.office import OfficeRecord
from pyramid_oereb.core.records.real_estate import RealEstateRecord
from pyramid_oereb.core.records.view_service import ViewServiceRecord
from pyramid_oereb.core.renderer.extract.xml_ import Renderer
from pyramid_oereb.core.renderer.versions.xml_ import Renderer as VersionsRenderer
from pyramid_oereb.core.views.webservice import Parameter
from tests.mockrequest import MockRequest
def test_version_against_schema(logo_test_data, schema_xml_versions, DummyRenderInfo):
versions = {
u'GetVersionsResponse': {
u'supportedVersion': [
{
u'version': u'1.0',
u'serviceEndpointBase': u'https://example.com'
}
]
}
}
renderer = VersionsRenderer(DummyRenderInfo())
rendered = renderer._render(versions)
xmlschema_doc = etree.parse(schema_xml_versions)
xmlschema = etree.XMLSchema(xmlschema_doc)
buffer = BytesIO(rendered)
doc = etree.parse(buffer)
assert xmlschema.validate(doc)
def _get_test_extract(config, glossary):
view_service = ViewServiceRecord(
{'de': u'http://geowms.bl.ch'},
1, 1.0, 'de', 2056, None, None
)
real_estate = RealEstateRecord(u'Liegenschaft', u'BL', u'Liestal', 2829, 11395,
MultiPolygon([Polygon([(0, 0), (1, 1), (1, 0)])]),
u'http://www.geocat.ch', u'1000', u'BL0200002829', u'CH775979211712')
real_estate.set_view_service(view_service)
real_estate.set_main_page_view_service(view_service)
office_record = OfficeRecord({'de': u'AGI'}, office_at_web={
'de': 'https://www.bav.admin.ch/bav/de/home.html'
})
resolver = DottedNameResolver()
date_method_string = (config
.get('extract')
.get('base_data')
.get('methods')
.get('date'))
date_method = resolver.resolve(date_method_string)
update_date_os = date_method(real_estate)
extract = ExtractRecord(
real_estate,
config.get_oereb_logo(),
config.get_conferderation_logo(),
config.get_canton_logo(),
config.get_municipality_logo(1234),
office_record,
update_date_os,
disclaimers=[
DisclaimerRecord({'de': u'Haftungsausschluss'}, {'de': u'Test'})
],
glossaries=glossary,
general_information=config.get_general_information()
)
# extract.qr_code = 'VGhpcyBpcyBub3QgYSBRUiBjb2Rl'.encode('utf-8') TODO:
# qr_code Must be an image ('base64Binary'), but even with images xml validation
# fails on it.
# extract.electronic_signature = 'Signature' # TODO: fix signature rendering first
return extract
@pytest.mark.parametrize('parameter, test_extract, buf_len', [
(
Parameter('reduced', 'xml', False, False, 'BL0200002829', '1000', 'CH775979211712', 'de'),
[GlossaryRecord({'de': u'Glossar'}, {'de': u'Test'})], # 'get_default_extract'
4775
),
(
Parameter('reduced', 'xml', False, False, 'BL0200002829', '1000', 'CH775979211712', 'de'),
[], # 'get_empty_glossary_extract'
4407
),
(
Parameter('reduced', 'xml', False, False, 'BL0200002829', '1000', 'CH775979211712', 'de'),
None, # 'get_none_glossary_extract'
4407
)
])
def test_extract_against_schema(real_estate_test_data, logo_test_data, schema_xml_extract,
DummyRenderInfo, parameter, test_extract, buf_len):
from pyramid_oereb.core.config import Config
extract = _get_test_extract(Config, test_extract)
renderer = Renderer(DummyRenderInfo())
renderer._language = u'de'
renderer._request = MockRequest()
renderer._request.route_url = lambda url, **kwargs: "http://example.com/current/view"
rendered = renderer._render(extract, parameter)
# TODO: fix schema validiation -- slown and cannot resolve online resources
# xmlschema_doc = etree.parse(schema_xml_extract)
# xmlschema = etree.XMLSchema(xmlschema_doc) # schema parsing very slow and fails
buffer = BytesIO(rendered)
assert buffer.seek(0, 2) == buf_len # temporary check assert buffer length == 4775
# doc = etree.parse(buffer)
# xmlschema.assertValid(doc)
|
# Generated by Django 3.2.3 on 2021-06-07 11:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0004_auto_20210607_1216'),
]
operations = [
migrations.AddField(
model_name='neighborhood',
name='description',
field=models.TextField(default=''),
),
]
|
import argparse
# ------------------------------------------------------------------------------
def collision_checker(obstacle_map: list, dx: int, dy: int, verbose: bool) -> int:
""" Lool for tree collisions along the specified path, return the number of collisions
Inputs:
obstacle_map: list - The grid map of obstacles:
'.' represents a clear space
'#' represents a tree
dx: int - Amount of right run on the path
dy: int - Amount of down run on the path
verbose: bool - Print the collision map for debugging purposes
Outputs:
int - Total number of tree collisions detected along the tobaggon path
"""
collisions = 0
width = len(obstacle_map[0])
x = 0
for line_num, line in enumerate(obstacle_map):
if line_num % dy:
continue
# ------------------------------------------------------------------------
if '#' == line[x]:
collisions += 1
# ------------------------------------------------------------------------
if verbose:
collision_map = []
for i, char in enumerate(line):
if i == x:
collision_map.append('X' if '#' == line[x] else 'O')
else:
collision_map.append(char)
print('{:03d},{:03d}: {}'.format(line_num+1, x, ''.join(collision_map)))
# ------------------------------------------------------------------------
x = (x + dx) % width
return collisions
# ------------------------------------------------------------------------------
def traverse_route(filename: str, part_two: bool, verbose: bool) -> None:
""" Load the input file and traverse the route(s) looking for tree collisions.
Inputs:
filename: str - Name of the password file to be processed
part_two: bool - Calculate the solution for part 2 of the puzzle if true
verbose: bool - Print the collision map for debugging purposes
Outputs:
None
"""
with open(filename, 'r') as f:
data = f.read().splitlines()
# For some reason we are multiplying instead of summing the collisions
# for part 2 of the puzzle, so we will default this to 1.
collisions = 1
if not part_two:
collisions = collision_checker(data, 3, 1, verbose)
else:
routes = ((1, 1), (3, 1), (5, 1), (7, 1), (1, 2))
for route in routes:
collisions *= collision_checker(data, route[0], route[1], verbose)
print('Tree collisions:', collisions)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Advent Of Code Challenge 2020 - Day 3, Calculate number of tree collisions for Toboggan run.')
parser.add_argument('file_in', type=str, metavar='input_filename', help='Name of input file.')
parser.add_argument('--part_two', '-p', action='store_true', help='Solve for part 2 of the puzzle.')
parser.add_argument('--verbose', '-v', action='store_true', help='Display the collision map.')
args = parser.parse_args()
traverse_route(args.file_in, args.part_two, args.verbose)
|
#encoding:utf-8
# Create your views here.
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from Application import util
from forms import CuadroForm, DescripcionForm
from Application.forms import AutorForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def home(request):
return render_to_response('index.html',RequestContext(request,{"agenda":util.getAgendaCultural()}))
def buscaCuadroPorTitulo(request):
if request.method=='POST' or request.GET.get('busqueda') is not None:
formulario=CuadroForm(request.POST)
if formulario.is_valid():
titulo=formulario.cleaned_data['titulo']
elif request.GET.get('busqueda') is not None:
print "meh"
titulo = request.GET.get('busqueda')
else:
return render_to_response('cuadro_form.html',RequestContext(request,{'formulario':formulario}))
print titulo
cuadros = util.buscaCuadroPorTitulo(titulo)
page=request.GET.get('page', 1)
paginator=Paginator(cuadros, 6)
try:
cuadrosPaginados=paginator.page(page)
except PageNotAnInteger:
cuadrosPaginados=paginator.page(1)
except EmptyPage:
cuadrosPaginados=paginator.page(paginator.num_pages)
print cuadros
return render_to_response('cuadros.html',{'lista':cuadrosPaginados,'busqueda':titulo,'por':'título'})
else:
formulario=CuadroForm()
return render_to_response('cuadro_form.html',RequestContext(request,{'formulario':formulario}))
def buscaCuadroPorAutor(request):
if request.method=='POST' or request.GET.get('busqueda') is not None:
formulario=AutorForm(request.POST)
if formulario.is_valid():
autor=formulario.cleaned_data['autor']
elif request.GET.get('busqueda') is not None:
print "meh"
autor = request.GET.get('busqueda')
else:
return render_to_response('cuadro_form.html',RequestContext(request,{'formulario':formulario}))
print autor
cuadros = util.buscaCuadroPorAutor(autor)
page=request.GET.get('page', 1)
paginator=Paginator(cuadros, 6)
try:
cuadrosPaginados=paginator.page(page)
except PageNotAnInteger:
cuadrosPaginados=paginator.page(1)
except EmptyPage:
cuadrosPaginados=paginator.page(paginator.num_pages)
print cuadros
return render_to_response('cuadros.html',{'lista':cuadrosPaginados,'busqueda':autor,'por':'autor'})
else:
formulario=AutorForm()
return render_to_response('cuadro_form.html',RequestContext(request,{'formulario':formulario}))
def getCuadro(request,autor,titulo):
cuadro = util.getCuadroPorTituloYAutor(titulo,autor)
more=util.buscarMasParecidos(titulo,autor)
return render_to_response('cuadro.html', {'dict': cuadro,'more':more})
def getAutor(request,autor):
cuadros = util.buscaCuadroPorAutor(autor,numResultados=3)
wiki=util.getWikiAutor(autor)
keywords=util.palabrasClave(autor)
return render_to_response('autor.html', {'lista': cuadros,'wiki':wiki,'autor':autor,'keywords':keywords})
def buscaCuadroPorDescripcion(request):
if request.method=='POST' or request.GET.get('busqueda') is not None:
formulario=DescripcionForm(request.POST)
if formulario.is_valid():
descripcion=formulario.cleaned_data['descripcion']
elif request.GET.get('busqueda') is not None:
print "meh"
descripcion = request.GET.get('busqueda')
else:
return render_to_response('cuadro_form.html',RequestContext(request,{'formulario':formulario}))
print descripcion
cuadros = util.highlights(descripcion)
page=request.GET.get('page', 1)
paginator=Paginator(cuadros, 6)
try:
cuadrosPaginados=paginator.page(page)
except PageNotAnInteger:
cuadrosPaginados=paginator.page(1)
except EmptyPage:
cuadrosPaginados=paginator.page(paginator.num_pages)
print cuadros
return render_to_response('cuadro_descripcion.html',{'lista':cuadrosPaginados,'busqueda':descripcion,'por':'descripcion'})
else:
formulario=DescripcionForm()
return render_to_response('cuadro_form.html',RequestContext(request,{'formulario':formulario}))
def buscaCuadroPorPalabraClave(request,descripcion):
cuadros = util.highlights(descripcion)
return render_to_response('cuadro_descripcion.html',{'lista':cuadros,'busqueda':descripcion,'por':'descripción'})
|
""""
This script solves the famous "fold piece of paper to the moon" problem
Problem description: how many times would you have to fold a piece of paper onto itself to reach the Moon?
"""
import numpy as np
# paper width in meters: 0.1 mm
paperWidth = 0.1*10**-3
# distance earth moon: 384,400 km
distance = 384400*10**3
# equation can be formulated as : 2^n w = d
n = np.log(distance/paperWidth)/np.log(2)
# display nicely
np.set_printoptions(precision=2)
print np.array([n])
|
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command generator for running a script against a Azure SQL cluster.
Contains the method to compile the Azure SQL data warehouse specific script
execution command based on generic arguments (sql script, output destination)
and Azure SQL specific arguments (flag values).
"""
__author__ = 'p3rf@google.com'
from absl import flags
flags.DEFINE_string('server', None, 'SQL server.')
flags.DEFINE_string('database', None, 'SQL Database.')
flags.DEFINE_string('user', None, 'SQL User.')
flags.DEFINE_string('password', None, 'SQL Password.')
flags.mark_flags_as_required(['server', 'database', 'user', 'password'])
FLAGS = flags.FLAGS
def generate_provider_specific_cmd_list(script, driver, output, error):
"""Method to compile the Azure SQL specific script execution command.
Arguments:
script: SQL script which contains the query.
driver: Driver that contains the Azure SQL data warehouse specific script
executor.
output: Output log file.
error: Error log file.
Returns:
Command list to execute the supplied script.
"""
return [driver, FLAGS.server, FLAGS.database, FLAGS.user, FLAGS.password,
script, output, error]
|
#! python
import numpy as np
import cv2
import datetime
import PyCapture2 as fc2
import sys
import time
import os
import re
import skimage.io as io
import csv
from itertools import islice
import beeDataAcq.cameraSetup as cs
####################################################################
### Get a calibration image from each camera
####################################################################
def getCalibrationImages():
'''
Get images so I can do background subtraction.
Note that two cameras must be connected.
- This can be checked by running cameraSetup first
'''
try:
bus = fc2.BusManager()
numCams = bus.getNumOfCameras()
cam1 = fc2.Camera()
cam1.connect(bus.getCameraFromIndex(0))
cam2 = fc2.Camera()
cam2.connect(bus.getCameraFromIndex(1))
# start capture
cs.enableEmbeddedTimeStamp(cam1, True)
cam1.startCapture()
cs.enableEmbeddedTimeStamp(cam1, True)
cam2.startCapture()
image = cam1.retrieveBuffer()
image2 = cam2.retrieveBuffer()
# show still image
img = np.concatenate((cs.img2array(image), cs.img2array(image2)), axis = 1)
io.imshow(img)
# save images with specific filenames
return(cs.img2array(image).astype(np.int16), cs.img2array(image2).astype(np.int16))
except:
print("error on calibration images")
####################################################################
### Resize images (for speed)
####################################################################
def reduceSize(dat, originalShape = [1024, 1280], proportion = 1/10):
# reduce resolution by 4X to make it faster
def downsample_to_proportion(rows, proportion=1):
return(list(islice(rows, 0, len(rows), int(1/proportion))))
def writeArr(ctr, proportion = 0.25):
return(downList[int(ctr*originalShape[1]*proportion):\
int((ctr+1)*originalShape[1]*proportion)])
downList = downsample_to_proportion(dat, proportion = proportion)
lstLst = [writeArr(ctr, proportion = proportion)\
for ctr in range(originalShape[0])]
new_list = downsample_to_proportion(lstLst, proportion =proportion)
smallImg = np.array(new_list)
return(smallImg)
####################################################################
### Check if there is a bee close to the flower
####################################################################
def beeInImage(calImg, frame, blurAmt = (5,5), areaThreshold= 5):
'''
Returns True if a bee is detected in the image
Detects bees by size of dark blobs in the image
--if it doesn't work, try including light blobs.
Parameters
----------
calImg : np.array(int16) -- note that it is NOT uint8, which is default
Calibration image (no bee visible)
frame : np.array(int16)
frame of current image to compare with calibration image
Returns
-------
bool
True if there is a bee in the frame
'''
# check dtype
if calImg.dtype != "int16":
calImg = calImg.astype('int16')
if frame.dtype != "int16":
frame = frame.astype('int16')
# get image difference
im1Diff = (calImg - frame)
height,width = im1Diff.shape
# crop image to a circle
mask_circ = np.zeros((height,width), np.uint8)
cv2.circle(mask_circ,(int(width/2),int(height/2)),int(np.min([width,height])/2),(255),thickness=-1)
imDiff_cropped = cv2.bitwise_and(im1Diff, im1Diff, mask=mask_circ)
# gaussian blur
# 121, 121 works for full sized image, 15,15 works for 4x smaller image
# 5,5 works for 1/10 size
blur = cv2.GaussianBlur(imDiff_cropped, blurAmt ,0)
# get darker sections (positive threshold gives dark areas)
ret_dark,th3_dark = cv2.threshold(blur,70,255,cv2.THRESH_BINARY)
# get areas
img, cnts, _ = cv2.findContours(th3_dark.astype('uint8'), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones(th3_dark.shape[:2], dtype="uint8") * 0 # create a blank black mask
areas = np.array([cv2.contourArea(c, False) for c in cnts])
if len(areas) == 0:
areas = np.array([0])
else:
print(max(areas))
# if there is at least one area over areaThreshold, then it's a bee
return(any(areas > areaThreshold), max(areas))
####################################################################
### Save data and write csv file from each image
####################################################################
def saveAviHelper2_process(conn, cam, camCal1, cam2, camCal2,
fileFormat, fileName, fileName2, csvFileName,
frameRate, maxImgs = 500):
'''
Saves video as .avi
Saves dataset -- each row is a single timestep, when frames were retreived
Arguments:
conn (connection): a child_connection for multiprocessing
cam (camera): camera 1
camCal1 (np.array): background image for cam
cam2 (camera): camera 2
camCal2 (np.array): background image for cam2
fileFormat (string): should be avi
filename, filename2: (str) filenames for recorded videos
csvFileName (str): filename for writing data about frames
frameRate (int): should be set to max (the arduino is actually in charge of fps)
maxImgs (int): number of images before quitting.
'''
numImages = 0
tmpDat = np.empty(5, dtype = '<U26')
avi = fc2.FlyCapture2Video()
avi2 = fc2.FlyCapture2Video()
# resize calibration images 4x
camCal1 = camCal1[::10,::10]
camCal2 = camCal2[::10,::10]
# OPEN WINDOW
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 1280,512)
for i in range(maxImgs):
try:
tmpDat[0] = str(datetime.datetime.now().strftime("%Y_%m_%d__%H_%M_%S_%f")[:-3])
image = cam.retrieveBuffer()
image2 = cam2.retrieveBuffer()
dat1,dat2 = image.getData(), image2.getData()
# make images smaller
frame1 = reduceSize(dat1, (image.getRows(), image.getCols()), proportion = 1/10)
frame2 = reduceSize(dat2, (image2.getRows(), image2.getCols()), proportion = 1/10)
tmpDat[1], tmpDat[2] = beeInImage(camCal1, frame1)
tmpDat[3], tmpDat[4] = beeInImage(camCal2, frame2)
# write to file
with open(csvFileName, 'a+', newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
if i == 0:
wr.writerow(["datetime", "beeInImage1", "darkArea1", "beeInImage2", "darkArea2"]) #write header
wr.writerow(tmpDat)
except fc2.Fc2error as fc2Err:
print("Error retrieving buffer : ", fc2Err)
continue
#print("Grabbed image {}".format(i))
# check connection, and break of something is received
if conn.poll():
print(str(i) + str(conn.recv()))
for jj in range(10):
cv2.destroyAllWindows()
break
if (i == 0):
if fileFormat == "AVI":
avi.AVIOpen(fileName, frameRate)
avi2.AVIOpen(fileName2, frameRate)
elif fileFormat == "MJPG":
avi.MJPGOpen(fileName, frameRate, 75)
avi2.MJPGOpen(fileName2, frameRate, 75)
elif fileFormat == "H264":
avi.H264Open(fileName, frameRate, image.getCols(), image.getRows(), 1000000)
avi2.H264Open(fileName2, frameRate, image2.getCols(), image2.getRows(), 1000000)
else:
print("Specified format is not available.")
return
# show still image
img = np.concatenate((cs.img2array(image), cs.img2array(image2)), axis = 1)
# Display the resulting frame
cv2.imshow('image', img)
# break when "q" is pressed on keyboard
k = cv2.waitKey(1) & 0xFF
if (k == ord('q')) or (k == 27):
for jj in range(10):
cv2.destroyAllWindows()
break
# refref add image timestamp
avi.append(image)
avi2.append(image2)
numImages += 1
#print("Appended image {}...".format(i))
# close windows if loop ends
for jj in range(10):
cv2.destroyAllWindows()
print("Appended {} images to {} file: {}...".format(numImages, fileFormat, fileName))
avi.close()
avi2.close()
####################################################################
### Save data but do no processing
####################################################################
def saveAviHelper2(conn, cam, cam2, fileFormat, fileName, fileName2, frameRate, maxImgs = 500):
numImages = 0
avi = fc2.FlyCapture2Video()
avi2 = fc2.FlyCapture2Video()
for i in range(maxImgs):
try:
image = cam.retrieveBuffer()
image2 = cam2.retrieveBuffer()
except fc2.Fc2error as fc2Err:
print("Error retrieving buffer : ", fc2Err)
continue
print("Grabbed image {}".format(i))
# check connection, and break of something is received
if conn.poll():
print(str(i) + str(conn.recv()))
for jj in range(10):
cv2.destroyAllWindows()
break
if (i == 0):
if fileFormat == "AVI":
avi.AVIOpen(fileName, frameRate)
avi2.AVIOpen(fileName2, frameRate)
elif fileFormat == "MJPG":
avi.MJPGOpen(fileName, frameRate, 75)
avi2.MJPGOpen(fileName2, frameRate, 75)
elif fileFormat == "H264":
avi.H264Open(fileName, frameRate, image.getCols(), image.getRows(), 1000000)
avi2.H264Open(fileName2, frameRate, image2.getCols(), image2.getRows(), 1000000)
else:
print("Specified format is not available.")
return
# show still image
img = np.concatenate((cs.img2array(image), cs.img2array(image2)), axis = 1)
# Display the resulting frame
cv2.imshow('image', img)
# break when "q" is pressed on keyboard
k = cv2.waitKey(1) & 0xFF
if (k == ord('q')) or (k == 27):
for jj in range(10):
cv2.destroyAllWindows()
break
# refref add image timestamp
avi.append(image)
avi2.append(image2)
numImages += 1
print("Appended image {}...".format(i))
# close windows if loop ends
for jj in range(10):
cv2.destroyAllWindows()
print("Appended {} images to {} file: {}...".format(numImages, fileFormat, fileName))
avi.close()
avi2.close()
################################################################
#### MAIN
################################################################
def main(conn, camCal1, camCal2, directory = "C:\\Users\\cswitzer.BEES\\Desktop\\TempVids"):
# avi recording function
bus = fc2.BusManager()
numCams = bus.getNumOfCameras()
c = fc2.Camera()
c.connect(bus.getCameraFromIndex(0))
d = fc2.Camera()
d.connect(bus.getCameraFromIndex(1))
# start capture
cs.enableEmbeddedTimeStamp(c, True)
c.startCapture()
cs.enableEmbeddedTimeStamp(c, True)
d.startCapture()
if not os.path.exists(directory):
os.makedirs(directory)
movieID = str(datetime.datetime.now().strftime("%Y_%m_%d__%H_%M_%S_%f")[:-3])
fileName = os.path.join(directory, movieID + "_cam1" + ".avi")
fileName2 = os.path.join(directory, movieID + "_cam2" + ".avi")
csvFileName = os.path.join(directory, movieID + ".csv")
conn.send(os.path.join(directory, movieID))
saveAviHelper2_process(conn, c, camCal1, d, camCal2,
"MJPG", fileName.encode("utf-8"), fileName2.encode("utf-8"),
csvFileName,
10, maxImgs = 10000)
# MJPG is slower than AVI
if __name__ == "__main__":
main(directory)
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_simulation_n.py
# Create Date: 2015-07-29 20:57:10
# Usage: AC_simulation_n.py
# Descripton:
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @param {TreeNode} p
# @param {TreeNode} q
# @return {TreeNode}
def lowestCommonAncestor(self, root, p, q):
if p.val > q.val:
p, q = q, p
cur = root
while p.val > cur.val or q.val < cur.val:
if p.val > cur.val:
cur = cur.right
else:
cur = cur.left
return cur
|
# Assumes ODBLink MX on Bluetooth COM port, attached to 2019 Audi e-tron. Should also work with other ELM327 adapters.
import collections
import numpy as np
import serial
import time
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
import logging
import sys
import requests
liveplot = False
voltage = 0
current = 0
soc = 0
batt_temp_min = 0
batt_temp_max = 0
ignition_on = False
charging = False
fast_charging = False
curtime = int(time.time())
def send_elm_cmd(command):
logging.debug(b'Sending ' + command)
adapter.write(command + b'\r')
# read response
response = adapter.read_until(expected=b'\r')
logging.debug(response)
# wait for CLI
logging.debug(adapter.read_until(expected=b'>'))
return response
def get_data(i=0):
global liveplot
global voltage
global current
global soc
global batt_temp_min
global batt_temp_max
global ignition_on
global charging
global fast_charging
global curtime
try:
voltage = int((send_elm_cmd(b'03221e3b55555555').replace(b' ', b''))[8:12], 16) / 10
except ValueError:
logging.error("Unexpected value received from ECU")
try:
current = -1 * (int((send_elm_cmd(b'03221e3d55555555').replace(b' ', b''))[8:14], 16) - 150000) / 100
except ValueError:
logging.error("Unexpected value received from ECU")
try:
soc = int((send_elm_cmd(b'0322028C55555555').replace(b' ', b''))[8:10], 16)
except ValueError:
logging.error("Unexpected value received from ECU")
try:
batt_temp_max = (int((send_elm_cmd(b'03221e0e55555555\r').replace(b' ', b''))[8:10], 16) - 100)
except ValueError:
logging.error("Unexpected value received from ECU")
try:
batt_temp_min = (int((send_elm_cmd(b'03221e0f55555555').replace(b' ', b''))[8:10], 16) - 100)
except ValueError:
logging.error("Unexpected value received from ECU")
try:
state = int((send_elm_cmd(b'0322744855555555').replace(b' ', b''))[8:10], 16)
ignition_on = bool(state & 0x1)
charging = bool(state & 0x4)
fast_charging = bool(state & 0x2)
except ValueError:
logging.error("Unexpected value received from ECU")
curtime = int(time.time())
print("Voltage: " + str(voltage) + "V")
print("Current: %.2fA" % current)
print("Power: %.2fkW" % (voltage * current / 1000))
print("SoC: " + str(soc) + "%")
print("Batt Temp min/max: " + str(batt_temp_min) + "/" + str(batt_temp_max) + "°C")
print("Ignition: " + str(ignition_on))
print("Charging: " + str(charging))
print("DCFC: " + str(fast_charging))
print("Time: " + str(curtime))
print()
if liveplot:
# append power and voltage for plotting
powers.popleft()
powers.append(voltage * current / 1000)
voltages.popleft()
voltages.append(voltage)
# configure power plot
ax.cla()
ax.plot(powers)
ax.scatter(len(powers) - 1, powers[-1])
ax.text(len(powers) - 1, powers[-1], "{:.2f}kW".format(powers[-1]))
ax.set_ylim(min(0, min(powers)), max(0, max(powers)))
# configure voltage plot
ax1.cla()
ax1.plot(voltages)
ax1.scatter(len(voltages) - 1, voltages[-1])
ax1.text(len(voltages) - 1, voltages[-1], "{:.2f}V".format(voltages[-1]))
ax1.set_ylim(0, max(voltages))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
print('Running!')
adapter = serial.Serial(port='COM5', timeout=1)
if adapter.isOpen():
logging.info("Interface Open")
logging.info("Sending init commands")
send_elm_cmd(b'ATD') # defaults
send_elm_cmd(b'ATZ') # reset
send_elm_cmd(b'ATE0') # echo off
send_elm_cmd(b'ATL0') # linefeeds off
send_elm_cmd(b'ATSP7') # set protocol 7
send_elm_cmd(b'ATBI') # bypass initialization
send_elm_cmd(b'ATSH FC007B') # set header FC 00 7B
send_elm_cmd(b'ATCP 17') # can priority 17
send_elm_cmd(b'ATCAF0') # can automatic formatting off
send_elm_cmd(b'ATCF 17F') # can id filter set to 17F
send_elm_cmd(b'ATCRA 17FE007B') # can receive address to 17FE007B
if liveplot:
# set up graphing
powers = collections.deque(np.zeros(60))
voltages = collections.deque(np.zeros(60))
# define and adjust figure
fig = plt.figure(figsize=(6, 6), facecolor='#DEDEDE')
ax = plt.subplot(121)
ax1 = plt.subplot(122)
ax.set_facecolor('#DEDEDE')
ax1.set_facecolor('#DEDEDE')
ani = FuncAnimation(fig, get_data, interval=1000)
plt.show()
else:
while 1:
get_data()
time.sleep(1)
print("closing!")
adapter.close()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
"""JSON osid metadata configurations for proxy service."""
from .. import types
from ..primitives import Type
DEFAULT_LANGUAGE_TYPE = Type(**types.Language().get_type_data("DEFAULT"))
DEFAULT_SCRIPT_TYPE = Type(**types.Script().get_type_data("DEFAULT"))
DEFAULT_FORMAT_TYPE = Type(**types.Format().get_type_data("DEFAULT"))
DEFAULT_GENUS_TYPE = Type(**types.Genus().get_type_data("DEFAULT"))
|
def activitySelect(start, finish):
activities = []
finTime = max(finish)
for i in xrange(len(start)):
activity = (start[i], finish[i])
activities.append(activity)
activities.sort(key=lambda x: x[1])
time = 0
selected = []
while (time < finTime):
activity = None
for act in activities:
if (act[0] >= time):
activity = act
break
selected.append(activity)
index = activities.index(act)
activities.pop(index)
time = activity[1]
return selected
print activitySelect([1,3,0,5,8,5], [2,4,6,7,9,9])
|
import keras
import skimage.transform as tf
import numpy as np
import random
import scipy
class BaseGenerator(keras.utils.Sequence):
def __init__(self, batch_size, input_shape, augment):
self.batch_size = batch_size
self.input_shape = input_shape
self.augment = augment
self._mid = input_shape[0] // 2
# initialize augmentation ranges
self.translations = [f for f in range(-3, 4)]
self.rotations = ([f for f in range(330, 365, 5)]
+ [f for f in range(0, 35, 5)])
super().__init__()
def _augment_img(self, img, x_trans, y_trans, angle, flip=False):
if flip:
img = np.fliplr(img)
trans = tf.AffineTransform(translation=[x_trans, y_trans])
img = tf.rotate(img, angle, center=(self._mid, self._mid))
return tf.warp(img, trans, preserve_range=True)
def _augment(self, img):
"""Add the same random rotation and translation, chosen from the
predefined ranges, to both given images"""
# choose random variation
x_trans = random.sample(self.translations, 1)[0]
y_trans = random.sample(self.translations, 1)[0]
angle = random.sample(self.rotations, 1)[0]
return self._augment_img(img, x_trans, y_trans, angle)
def __next__(self):
inputs, targets = self.__getitem__(None)
return inputs, targets
class SiameseGenerator(BaseGenerator):
def __init__(self, distances, images, mapping, batch_size, input_shape,
augment=True):
self.distances = distances
self.images = np.copy(images)
self.mapping = mapping
self.rotations = [f for f in range(0, 365, 5)]
super().__init__(batch_size, input_shape, augment)
def __len__(self):
# number of batches to cover all possible image pairs
return int(np.ceil(scipy.special.comb(len(self.images), 2)
/ self.batch_size))
def _augment_pair(self, img1, img2):
# choose random variation
x_trans = random.sample(self.translations, 1)[0]
y_trans = random.sample(self.translations, 1)[0]
angle = random.sample(self.rotations, 1)[0]
flip = random.sample([True, False], 1)[0]
i1 = self._augment_img(img1, x_trans, y_trans, angle, flip)
i2 = self._augment_img(img2, x_trans, y_trans, angle, flip)
return i1, i2
def __getitem__(self, idx):
pairs = [np.zeros((self.batch_size, *self.input_shape))
for i in range(2)]
targets = np.zeros((self.batch_size,))
g_is = random.sample(self.mapping.keys(), self.batch_size)
g_js = random.sample(self.mapping.keys(), self.batch_size)
for i, (g_i, g_j) in enumerate(zip(g_is, g_js)):
# extract distance from distance matrix & get corresponding images
targets[i] = self.distances[g_i, g_j]
if self.augment:
i1, i2 = self._augment_pair(self.images[self.mapping[g_i]],
self.images[self.mapping[g_j]])
else:
i1 = self.images[self.mapping[g_i]]
i2 = self.images[self.mapping[g_j]]
pairs[0][i] = i1
pairs[1][i] = i2
return pairs, targets
class KerasGenerator(BaseGenerator):
def __init__(self, src_generator, batch_size, input_shape, augment=False):
self.generator = src_generator
super().__init__(batch_size, input_shape, augment)
def __len__(self):
return len(self.generator)
def __getitem__(self, idx):
inputs = np.zeros((self.batch_size, *self.input_shape))
batch = next(self.generator)
if self.augment:
for i in range(self.batch_size):
inputs[i] = self._augment(batch[0][i])
else:
inputs = batch[0]
return inputs, batch[1]
|
"""
Convex Contracts for Starfish
"""
__version__ = '0.0.2'
|
# @lc app=leetcode id=583 lang=python3
#
# [583] Delete Operation for Two Strings
#
# https://leetcode.com/problems/delete-operation-for-two-strings/description/
#
# algorithms
# Medium (52.97%)
# Likes: 2154
# Dislikes: 39
# Total Accepted: 93.1K
# Total Submissions: 174.8K
# Testcase Example: '"sea"\n"eat"'
#
# Given two strings word1 and word2, return the minimum number of steps
# required to make word1 and word2 the same.
#
# In one step, you can delete exactly one character in either string.
#
#
# Example 1:
#
#
# Input: word1 = "sea", word2 = "eat"
# Output: 2
# Explanation: You need one step to make "sea" to "ea" and another step to make
# "eat" to "ea".
#
#
# Example 2:
#
#
# Input: word1 = "leetcode", word2 = "etco"
# Output: 4
#
#
#
# Constraints:
#
#
# 1 <= word1.length, word2.length <= 500
# word1 and word2 consist of only lowercase English letters.
#
#
#
# @lc tags=string
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 通过删除字符使两字符串相同。
# 动态规划。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
rows, cols = len(word1) + 1, len(word2) + 1
dp = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
dp[i][0] = i
for j in range(cols):
dp[0][j] = j
for i in range(1, rows):
for j in range(1, cols):
if word1[i - 1] == word2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = (1 + min(dp[i - 1][j], dp[i][j - 1]))
return dp[-1][-1]
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('word1 = "sea", word2 = "eat"')
print('Exception :')
print('2')
print('Output :')
print(str(Solution().minDistance("sea", "eat")))
print()
print('Example 2:')
print('Input : ')
print('word1 = "leetcode", word2 = "etco"')
print('Exception :')
print('4')
print('Output :')
print(str(Solution().minDistance("leetcode", "etco")))
print()
pass
# @lc main=end
|
import numpy as np
import cv2
def det_banana(filename):
cap = cv2.VideoCapture(filename)
# take first frame of the video
ret,frame = cap.read()
cv2.imwrite(filename+".jpg",frame)
# setup initial location of window
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 50.,50.)), np.array((32.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
print(pts)
print('\n')
img2 = cv2.polylines(frame,[pts],True, 255,5)
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
# filename = 'bananaSem.mp4'
# det_banana(filename)
# filename = 'banana1.mp4'
# det_banana(filename)
# filename = 'banana2.mp4'
# det_banana(filename)
filename = 'banana3.mp4'
det_banana(filename)
|
from gitlab import Gitlab
def create(key, repository, title, description, head, base='master', close_source_branch=False, reviewer=None,
label=None, url='https://gitlab.com/', **_):
if not reviewer:
reviewer = []
gl = Gitlab(url=url, private_token=key)
assignee_ids = [
gl_id(r, gl.users, 'username')
for r
in reviewer
]
project_id = gl_id(repository, gl.projects)
project = gl.projects.get(project_id)
mr = project.mergerequests.create({
'title': title,
'description': description,
'source_branch': head,
'target_branch': base,
'label': label,
'assignee_ids': assignee_ids,
'remove_source_branch': close_source_branch,
})
return str(mr)
def is_integral(var):
try:
int(var)
return True
except ValueError:
return False
def gl_id(name, objects, field='name'):
if is_integral(name):
return name
results = objects.list(**{field: name})
return results[0].id if results else name
|
""""""
import sys
import time
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os
import pandas as pd
from calc_execwise_features import calc_simulation_metrics as load_and_calc_simulation_metrics # Borrowed function
from nodewise_features_metapop import calc_node_metric, load_and_process_sim_data
from toolbox.file_tools import make_folder
from toolbox.plot_tools import set_color_cycle, colorbrewer_pf_02, stdfigsize
def main():
# ---------------------------------------
# INPUT PARAMETERS
# ---------------------------------------
# use_metrics = ("a_impulse", "peak_size", "outb_size", "rel_peak_time", "abs_herd_time", "rel_herd_time",
# "time_above_val") # ST pair + extras
use_metrics = ("num_outb_threshold", )
num_exec = 50
# sim_prefix = "outputs/sir_1k_outputs/reset/" \
# "rgn0p25-04-trav1e-3/k05p00_10cycles_eps1e-4/sir-reset"
# sim_prefix = "outputs/sir_1k_outputs/reset/" \
# "rgn0p25-04-trav1e-3/condit_k05p00_10cycles_eps1e-4_histher0p20/sir-reset"
# sim_prefix = "outputs/tests/resets_trav1p0e-2/sir-reset/"
sim_prefix = "outputs/reset_sensitivity/01/sir-reset_k20p00_tau1p0e-3/"
# --- Sim prefix can be overridden by an argv input
if len(sys.argv) > 1:
sim_prefix = sys.argv[1]
act_thres = 1.E-3
histher = 0.5
yscale = "log" # linear, log
plot_style = "designer" # "designer", "default"
calc_and_export_df = False # True = recalculate all and export / False = try to load from df.
keep_exec_bunches = False # If True, all sim data is loaded before calculation. Horribly bad for large sets.
save_plots = True
show_plots = False
check_executions = True # Remove executions that had no more a (given) small number of outbreaks
inv_thres = 2 # Must have at least this number of outbreaks to be valid (if check_executions is True).
should_remove_seed = True
seed_node = 40 # ALWAYS CHECK HERE IF SOMETHING CHANGES
should_remove_zero_outbs = False # Removes entries with zero outbreaks
report_each_exec = False # Whether to print a number for each finished execution.
plot_average = True
#
#
#
# -----------------------------------------
# EXECUTION
# -----------------------------------------
#
#
#
df_path = os.path.join(os.path.dirname(sim_prefix), "reset_metrics_df.csv")
sim_name = os.path.basename(sim_prefix[:-1])
if calc_and_export_df:
# --- // Load and calculation - COMMENT THE WHOLE ELIF BLOCK TO LOAD FROM PREMADE DF FILE and 'export/import'
print("Calculating metrics from raw data...")
if keep_exec_bunches:
xt0 = time.time()
exec_bunches, i_exec_list = load_execution_bunches(sim_prefix, num_exec)
xtf = time.time()
print("Time importing exec data: {:0.7f} s".format(xtf - xt0))
xt0 = time.time()
sim_metrics_df = calc_simulation_metrics(exec_bunches, use_metrics)
xtf = time.time()
print("Time calculating metrics: {:0.7f} s".format(xtf - xt0))
else:
# exec_bunches = None
xt0 = time.time()
sim_metrics_df = load_and_calc_simulation_metrics(sim_prefix, num_exec, use_metrics,
critical_value=act_thres, histher=histher,
report=report_each_exec)
xtf = time.time()
print("Time importing data and calculating metrics: {:0.7f} s".format(xtf - xt0))
# ---- \\
# Checkpoint the executionwise calculated metrics
export_df(sim_metrics_df, df_path)
else: # if not calc_and_export_df:
sim_metrics_df = import_df(df_path)
# ------ ------------
# Extra (as of Alberto's request/suggestion)
if check_executions:
# Remove executions that had no more than inv_thres outbreaks
invalid_executions = pick_invalid_executions(sim_metrics_df, minimum_attacked_nodes=inv_thres)
remove_executions_from_df(sim_metrics_df, invalid_executions)
print("Invalid executions (removed from dataset):")
print(invalid_executions)
if should_remove_seed:
remove_nodes_from_df(sim_metrics_df, seed_node)
if should_remove_zero_outbs:
remove_zero_outbreaks(sim_metrics_df)
# # --- Display the entire final df
# pd.set_option("display.max_rows", None)
# print(sim_metrics_df)
# --- Statistic aggregation
# Simple mean and std
# test_df = aggregate_executions_with_statistics(sim_metrics_df, "exec")
# AVERAGE OVER EXECUTIONS
# A data frame with the average metrics over all nodes for each execution
# avg_over_execs = sim_metrics_df.mean(level="exec") # Deprecation warning
avg_over_execs = sim_metrics_df.groupby(level="exec").mean()
# A list of execution histograms for each node.
# Signature: hist_list[i_ni] = pd.Series with the counts of each number of outbreaks.
hist_list = make_integer_histogram_executions(sim_metrics_df, use_metrics[0], level_name="exec")
# Overall occurrence histogram, for all nodes and executions
total_hist = pd.concat(hist_list).groupby(level=0).sum()
# --- Some cool feedback
print("Overall histogram entries")
print(total_hist)
print("Average num outbreaks = {:0.4f}".format(calc_avg_num_outb_from_hist(total_hist)))
# TODO - checkpoint here?
# --- PLOTS AND STUFF
setup_pyplot_style(plot_style)
# fig and ax are generated inside the function for better control of each figure's style
fig, ax = plot_overall_histogam(total_hist, yscale=yscale)
# fig, ax = plot_overall_histogam(hist_list[0])
# PLOTS THE HISTOGRAM OF AVERAGE NUMBER OF OUTBREAKS OVER ALL EXECUTIONS
if "num_outb_threshold" in use_metrics:
ea_fig, ea_ax = plot_exec_averaged_histogram(avg_over_execs, yscale=yscale, plot_average=plot_average)
if save_plots:
figname = os.path.join("tmp_figs", "num-outb_exec-avg") + "_" + sim_name
if should_remove_seed:
figname += "_noseed"
if should_remove_zero_outbs:
figname += "_nozeros"
ea_fig.savefig(figname + ".png")
ea_fig.savefig(figname + ".pdf")
# # Prints average number of outbreaks in each execution
# for a in avg_over_execs.iterrows():
# print(a[1])
# print()
fig.tight_layout()
if save_plots:
figname = os.path.join("tmp_figs", "num-outb_overall") + "_" + sim_name
if should_remove_seed:
figname += "_noseed"
if should_remove_zero_outbs:
figname += "_nozeros"
fig.savefig(figname + ".png")
fig.savefig(figname + ".pdf")
if show_plots:
plt.show()
def load_execution_bunches(sim_prefix, num_exec):
""" Loads the executions of a single simulation.
Returns a list of execution bunches (SimBunch objects).
If calc_metrics
"""
# Either takes all executions from 0 to num_exec-1 or assumes num_exec is an iterable with desired indexes.
if isinstance(num_exec, int):
i_exec_list = list(range(num_exec))
else:
i_exec_list = list(num_exec)
# Dummy execution loading, to get some constant features and save time.
i_exec = i_exec_list[0]
exec_bunch = load_and_process_sim_data(sim_prefix, i_exec=i_exec)
g = exec_bunch.g
nodes = exec_bunch.nodes
# Main execution loading
bunches = []
for i_exec in i_exec_list:
bunches.append(load_and_process_sim_data(sim_prefix, i_exec=i_exec, g=g, nodes=nodes))
return bunches, i_exec_list
def calc_simulation_metrics(exec_bunches, use_metrics, i_exec_list=None):
""" Calculates, for each execution dataset of a simulation, the required epidemic metrics.
Parameters
----------
exec_bunches : list
List of pre-loaded execution bunches from a simulation. List of SimBunch objects.
use_metrics : sequence of str
Names of the metrics to be calculated, as accepted in 'calc_node_metric'.
i_exec_list : list
Optional. The sequence of indexes of the executions. Must match the size of exec_bunches.
If not informed, it is simply set to [0, 1, 2, ..., num_exec], extracted from len(exec_bunches).
"""
# If not informed, i_exec_list is set to the first integers.
if i_exec_list is None:
i_exec_list = list(range(len(exec_bunches)))
# Gets node id list from first execution
nodes = exec_bunches[0].nodes
# Allocates containers for the calculated metrics
index = pd.MultiIndex.from_product((i_exec_list, nodes), names=("exec", "node"))
# noinspection PyTypeChecker
sim_metrics_df = pd.DataFrame(index=index, columns=use_metrics, dtype=(float, float)) # Signature: df[i_exec, ni]
# Loops over execution files.
for i_exec, exec_bunch in enumerate(exec_bunches):
# For each metric, calculates in all nodes
for metric in use_metrics:
# Metric calculation command
d = calc_node_metric(exec_bunch, metric, monitor_states=("I", "R"), monitor_value=0.01,
critical_value=1.E-3, histher=0.8) # For num_outbreaks, critical_value is used.
# Storing of the metrics into the multiindex dataframe
# sim_metrics_df.xs(i_exec, level="exec")[metric] = d["array"]
sim_metrics_df.loc[(i_exec, ), metric][:] = d["array"][:]
return sim_metrics_df
def aggregate_executions_with_statistics(df, level_name="exec"):
"""
For each simulation, calculates the average over a given level, as well as other statistical metrics.
Returns a data frame with mean and std of each metrics, as a multiindex column structure:
Returns
-------
Expected signature:
out_df(strategy, sim_prefix, nodeset)[(metric, score)]
... where score is "mean", "std".
"""
levels_to_groupby = list(df.index.names)
levels_to_groupby.remove(level_name)
grouped = df.groupby(level=levels_to_groupby)
return grouped.agg([np.mean, np.std])
def make_integer_histogram_executions(df, metric, level_name="exec"):
"""
For an execution set, calculates a histogram of the data over a given level and for a given metrics.
Assumes unique values, so this is NOT A CONTINUOUS VARIABLE HISTOGRAM.
Returns
-------
A list of data pd.Series with the counts of occurrences of each number of outbreaks. Each item is a node.
Expected signature:
count_series_list[i_ni] = pd.Series of {num_outbreak: occurrence_count}
"""
xt0 = time.time()
levels_to_groupby = list(df.index.names)
levels_to_groupby.remove(level_name)
grouped = df.groupby(level=levels_to_groupby)
# Counts unique occurrences for each node.
count_series_list = []
for ni, df in grouped:
dropped = df[metric].droplevel(level=levels_to_groupby) # This is an agnostic removal of grouped levels.
count_series_list.append(dropped.value_counts(sort=False))
xtf = time.time()
print("Time making nodewise histograms: {:0.6f} s".format(xtf - xt0))
return count_series_list
def setup_pyplot_style(type="default"):
if type == "default":
# --- First style used, green bars, etc
plt.style.use("mystyle_02")
set_color_cycle(colorbrewer_pf_02)
# Specific style parameters
plt.rcParams["xtick.top"] = "off"
plt.rcParams["xtick.bottom"] = "off"
# plt.rcParams["ytick.right"] = "off"
elif type == "designer":
# --- To match Yamir's designer style
plt.style.use("mystyle_02")
set_color_cycle(["#f492a5"])
# Hide the right and top spines
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
# Only show ticks on the left spines
mpl.rcParams["xtick.top"] = False
mpl.rcParams["xtick.bottom"] = False
mpl.rcParams["ytick.right"] = False
# Sets width of remaining spines and ticks
spines_width = 0.896
mpl.rcParams["axes.linewidth"] = spines_width
mpl.rcParams["ytick.major.width"] = spines_width
mpl.rcParams["ytick.minor.width"] = spines_width
# Bar
def plot_overall_histogam(total_df, yscale="linear", normalize=True):
"""Histogram of outbreak counts for all executions and nodes."""
fig, ax = plt.subplots(figsize=stdfigsize(scale=0.8, xtoy_ratio=1.61))
if normalize:
norm = total_df.sum()
ylabel = "Normalized frequency"
else:
norm = 1.
ylabel = "Frequency"
# Overall average over nodes and executions plot
ax.bar(total_df.index, total_df / norm)
ax.set_xticks(total_df.index)
ax.tick_params(axis="x", which="both", length=0) # Removes x ticks, leaving the labels on
ax.set_xlabel("Number of outbreaks")
ax.set_ylabel(ylabel)
ax.set_yscale(yscale)
return fig, ax
def plot_exec_averaged_histogram(avg_over_execs, bins=10, yscale="linear", normalize=True, plot_average=True):
"""
Parameters
----------
avg_over_execs : pd.dataFrame
"""
fig, ax = plt.subplots(figsize=stdfigsize(scale=0.8, xtoy_ratio=1.61))
# ----------- USES NUMPY HISTOGRAM , THEN MATPLOTLIB BAR PLOT
# Calc histogram using numpy
hist_array, bin_edges = np.histogram(avg_over_execs["num_outb_threshold"], bins)
if normalize:
norm = np.sum(hist_array)
ylabel = "Normalized frequency"
else:
norm = 1.
ylabel = "Frequency"
ax.bar(bin_edges[:-1], hist_array.astype(np.float32) / norm, align="edge",
width=bin_edges[1:]-bin_edges[:-1] - 0.002) # Tiny gap between them
# Shows the average on the plot
if plot_average:
avg = avg_over_execs["num_outb_threshold"].to_numpy().mean()
ax.text(0.8, 0.88, "mean = {:0.1f}".format(avg), transform=ax.transAxes, fontdict={"size": 18})
# plt.text()
# ------------ DIRECTLY USES pd.DataFrame.hist (no normalization possible, only 'density', which is not the same.
# # Can't normalize this way:
# hist = avg_over_execs.hist(column="num_outb_threshold", bins=bins, ax=ax, grid=False)
ax.set_title(None)
ax.set_xlabel("Average number of outbreaks")
ax.set_ylabel(ylabel)
ax.set_yscale(yscale)
fig.tight_layout()
return fig, ax
def export_df(df, fname):
make_folder(os.path.dirname(fname), silent=True)
df.to_csv(fname, sep=";")
def import_df(fname):
"""Reads data written with 'export_df'."""
return pd.read_csv(fname, index_col=[0, 1], sep=";", skipinitialspace=True, header=[0])
def pick_invalid_executions(metrics_df, minimum_attacked_nodes=1):
"""Detects executions whose number of nodes that had at least one outbreak is smaller than a given
threshold.
"""
# Count the number of nodes that had at least one outbreak for each exec.
num_attacked_nodes = np.empty(len(metrics_df), dtype=int)
i_exec_array = np.empty(len(metrics_df), dtype=int)
invalid_executions = list()
print("------------\nCounting the nodes that had at least one outbreak")
for i, (i_exec, exec_df) in enumerate(metrics_df.groupby(level="exec")):
count = np.sum(exec_df["num_outb_threshold"] > 0) # Performs the count of attacked nodes
num_attacked_nodes[i] = count
i_exec_array[i] = i_exec # Just in case i_exec is not sequential
# Criterion to determine the validity of an execution
if count < minimum_attacked_nodes:
invalid_executions.append(i_exec)
return invalid_executions
def remove_executions_from_df(df, i_exec_list):
"""Removes all entries of a df corresponding to a given set of execution indexes.
Changes are made in place.
Assumes a df with a multiindex structure with levels: (exec, node)
"""
df.drop(labels=i_exec_list, level="exec", inplace=True)
def remove_nodes_from_df(df, nodes_list):
"""
Assumes a df with a multiindex structure with levels: (exec, node)
"""
df.drop(labels=nodes_list, level="node", inplace=True)
def remove_zero_outbreaks(df):
"""
Assumes a column named "num_outb_threshold"
"""
to_drop = df[df["num_outb_threshold"] == 0].index
df.drop(to_drop, inplace=True)
def calc_avg_num_outb_from_hist(hist_df, field="num_outb_threshold"):
"""
Parameters
----------
hist_df : pd.DataSeries
"""
vals = hist_df.index.values
weights = hist_df.values
norm = np.sum(weights)
if norm == 0.0:
raise ValueError("Hey, total number of outbreaks on the histogram is zero. This could cause a math error")
return np.sum(vals * weights) / norm
if __name__ == "__main__":
main()
|
from polygraphy.tools.data.data import Data
|
from igramscraper.instagram import Instagram
from time import sleep
import os
from os import path
import datetime
import discord_webhook
import ast
import sys
from pytz import timezone
FOLLOWER_LIMIT = 10**6
#Your instagram bot account username
insta_username = 'Enter your bot insta ID'
#Your instagram bot account password
insta_password = 'Enter Your bot Insta ID Password'
#Username of the real instagram account which you want to monitor
username = 'Enter Your Original Account ID'
#Change this at your own risk
MINS_TO_SLEEP = 40
discord_webhook_url = 'Paste your Discord url here...'
def check_unfollowers(current,old):
return list(set(old) - set(current))
def check_followers(current,old):
return list(set(current) - set(old))
def start():
while True:
try:
print("iter")
instagram = Instagram()
instagram.with_credentials(insta_username, insta_password)
instagram.login(force=False,two_step_verificator=True)
sleep(2) # Delay to mimic user
followers = []
account = instagram.get_account(username)
sleep(1)
curr_time = datetime.datetime.now(timezone('Asia/Kolkata'))
curr_time = curr_time.strftime("%b %d, %Y - %H:%M:%S")
followers = instagram.get_followers(account.identifier, FOLLOWER_LIMIT, 100, delayed=True) # Get 150 followers of 'kevin', 100 a time with random delay between requests
# print(followers)
current_followers = []
for follower in followers['accounts']:
current_followers.append(follower.username)
del followers
if not path.exists("follower_list.txt"):
f = open("follower_list.txt","w")
f.write(str(current_followers))
f.close()
else:
f = open("follower_list.txt","r+")
old_followers = f.read()
f.close()
old_followers = ast.literal_eval(old_followers)
unfollowers = check_unfollowers(current_followers,old_followers)
followers = check_followers(current_followers,old_followers)
follower_change = len(current_followers)-len(old_followers)
follow_count = len(followers)
unfollow_count = len(unfollowers)
discord_webhook.send_msg(username,follower_change,followers,unfollowers,follow_count,unfollow_count,curr_time,discord_webhook_url)
f = open("follower_list.txt","w")
f.write(str(current_followers))
f.close()
except KeyboardInterrupt:
print("Exiting...")
sys.exit(0)
except Exception as e:
print(e)
sleep(MINS_TO_SLEEP*60)
if __name__ == '__main__':
if not os.path.exists('config_file.txt'):
print("You have not configured your details yet.\nRun config.py first")
sys.exit(0)
f = open('config_file.txt','r')
config = f.read()
f.close()
config = ast.literal_eval(config)
insta_username = config['insta_username']
insta_password = config['insta_password']
username = config['username']
discord_webhook_url = config['discord_webhook_url']
start()
|
from django.contrib import admin
from notenrechner.models import Fach
admin.site.register(Fach)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class RencaiItem(scrapy.Item):
job_name=scrapy.Field()
company = scrapy.Field()
salary = scrapy.Field()
degree = scrapy.Field()
experience = scrapy.Field()
want_numbers = scrapy.Field()
category = scrapy.Field()
tag = scrapy.Field()
workplace = scrapy.Field()
update_time = scrapy.Field()
address = scrapy.Field()
url = scrapy.Field()
|
#!/usr/bin/env python
import sys
import os
def split(delimiters, string, maxsplit=0):
import re
regexPattern = '|'.join(map(re.escape, delimiters))
return re.split(regexPattern, string, maxsplit)
def toHtml(input, outputPart="full"):
head=input.split("$content")[0]
body=input.split("$content")[1]
head=split(["+!","+%","##"], head.strip("\n").replace("+%","+%~mv").replace("+!", "+!~v").replace("##","##~c"))
for i in range(len(head)):
head[i]=head[i].strip("\n")
head.pop(0)
variables={}
metaVariables={}
for i in range(len(head)):
if head[i][0]=="~" and head[i][1]=="v":
variables[head[i].split("=")[0][2::]]=head[i].split("=")[1]
elif head[i][0]=="~" and head[i][1]=="m" and head[i][2]=="v":
metaVariables[head[i].split("=")[0][3::]]=head[i].split("=")[1]
elif head[i][0]=="~" and head[i][1]=="c":
print(f"Comment at line {i+2}: '{head[i][2::]}'")
else:
print(f"!! Daze Syntax Error: Invalid Character in $variables at line {i+2}")
metahtml=""
for i in list(variables):
contents=variables[i]
for j in variables.keys():
if variables[j][0]=="'"or variables[j][0]=='"':
contents=contents.replace(f"!{j} ",variables[j][1:-1])
else:
contents=contents.replace(f"!{j} ",variables[j])
variables[i]=contents
for i in list(metaVariables):
contents=metaVariables[i]
for j in metaVariables.keys():
if metaVariables[j][0]=="'"or metaVariables[j][0]=='"':
contents=contents.replace(f"%{j} ",metaVariables[j][1:-1])
else:
contents=contents.replace(f"%{j} ",metaVariables[j])
metaVariables[i]=contents
for i in list(metaVariables):
contents=metaVariables[i]
for j in variables.keys():
if variables[j][0]=="'"or variables[j][0]=='"':
contents=contents.replace(f"!{j} ",variables[j][1:-1])
else:
contents=contents.replace(f"!{j} ",variables[j])
metaVariables[i]=contents
for i in range(len(metaVariables)):
if list(metaVariables)[i]=="title":
metahtml=metahtml+f'<title>{metaVariables[list(metaVariables)[i]][1:-1]}</title>\n'
else:
metahtml=metahtml+f'<meta name="{list(metaVariables)[i][1:-1]}" content="{metaVariables[i][1:-1]}">\n'
body=body.strip("\n").split("(")
body.pop(0)
bodyhtml=""
for i in range(len(body)):
part=body[i].split(")")
element=part[0].strip(" ").split(": ")
contents=element[1]
if contents[0]=='"' and contents[-1]=='"' or contents[0]=="'" and contents[-1]=="'":
contents=contents[1:-1]
for i in variables.keys():
if variables[i][0]=="'"or variables[i][0]=='"':
contents=contents.replace(f"!{i} ",variables[i][1:-1])
else:
contents=contents.replace(f"!{i} ",variables[i])
attributes_unprocessed=part[1].strip("\n").split("+")
attributes={}
for i in range(len(attributes_unprocessed)):
attributes_unprocessed[i]=attributes_unprocessed[i].strip("\n")
attributes_unprocessed.pop(0)
for i in range(len(attributes_unprocessed)):
attributes[attributes_unprocessed[i].split("=")[0]]=attributes_unprocessed[i].split("=")[1]
strAttributes=""
for i in attributes.keys():
strAttributes=strAttributes+f' {i}="{attributes[i][1:-1]}"'
if element[0]=="img":
bodyhtml=bodyhtml+f'<img src="{contents}" {strAttributes}>\n'
elif element[0]=="linkScript":
bodyhtml=bodyhtml+f'<script src="{contents}" {strAttributes}></script>\n'
elif element[0]=="linkStyle":
bodyhtml=bodyhtml+f'<link rel="stylesheet" href="{contents}" {strAttributes}>\n'
elif element[0]=="link":
bodyhtml=bodyhtml+f'<link href="{contents}" {strAttributes}>\n'
elif element[0]=="script":
bodyhtml=bodyhtml+f'<script {strAttributes}>\n{contents}\n</script>\n'
elif element[0]=="style":
bodyhtml=bodyhtml+f'<style {strAttributes}>\n{contents}\n</style>\n'
else:
bodyhtml=bodyhtml+f'<{element[0]}{strAttributes}>{contents}</{element[0]}>\n'
if outputPart=="full":
return(f"""<!DOCTYPE html>
<html>
<!-- Site compiled from Daze -->
<head>
{metahtml}
</head>
<body>
{bodyhtml}
</body>
</html>""")
elif outputPart=="head":
return(f"""<!-- Part compiled from Daze -->
{metahtml}
<!-- End of Part -->
""")
elif outputPart=="body":
return(f"""<body>
<!-- Part compiled from Daze -->
{bodyhtml}
<!-- End of Part -->
""")
if len(sys.argv)>=2:
if sys.argv[1]=="compile":
if len(sys.argv)==4:
inFile = sys.argv[2]
outFile = sys.argv[3]
with open(inFile,'r') as i:
lines = i.read()
with open(outFile,'w') as o:
o.write(toHtml(lines))
elif len(sys.argv)>4:
print("daze: Too many arguments")
elif len(sys.argv)==3:
print("daze: No output file specified")
elif len(sys.argv)==2:
print("daze: No input file specified")
else:
print("daze: Internal Error 0x01")
elif sys.argv[1]=="help":
print("""
Daze: A declarative programing language
compile: Compiles the input file into an html file (daze compile <input> <output>)
help: Shows this help message
""")
else:
print(f"daze: invalid command {sys.argv[1]}. Try 'daze help' to see available commands.")
else:
print("daze: no command. Try 'daze help' to see available commands.")
|
from torch import nn
import torch.nn.functional as F
import torch
import numpy as np
from torch.quantization.default_mappings import DEFAULT_QAT_MODULE_MAPPING
from torch.quantization.observer import MinMaxObserver
_NBITS = 8
_ACTMAX = 4.0
class MovingAverageQuantileObserver(MinMaxObserver):
def __init__(self, averaging_constant=0.01, q_min=0.0, q_max=1.0, dtype=torch.quint8,
qscheme=torch.per_tensor_affine, reduce_range=8):
self.averaging_constant = averaging_constant
super(MovingAverageQuantileObserver, self).__init__(dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range)
self.q_min = q_min
self.q_max = q_max
def forward(self, x_orig):
x = x_orig.detach() # avoid keeping autograd tape
min_val = self.min_val
max_val = self.max_val
if self.q_min == 0.0:
min_now = torch.min(x)
else:
min_now = torch.tensor([np.quantile(x.cpu().numpy(), q=self.q_min)]).to(device=x.device)
if self.q_max == 1.0:
max_now = torch.max(x)
else:
max_now = torch.tensor([np.quantile(x.cpu().numpy(), q=self.q_max)]).to(device=x.device)
if min_val.numel() == 0 or max_val.numel() == 0:
min_val = min_now
max_val = max_now
else:
max_val = max_val + self.averaging_constant * (max_now - max_val)
min_val = min_val + self.averaging_constant * (min_now - min_val)
self.min_val = min_val
self.max_val = max_val
return x_orig
@torch.jit.export
def _calculate_qparams(self, min_val, max_val):
# type: (Tensor, Tensor) -> Tuple[Tensor, Tensor]
r"""Calculates the quantization parameters, given min and max
value tensors. Works for both per tensor and per channel cases
Args:
min_val: Minimum values per channel
max_val: Maximum values per channel
Returns:
scales: Scales tensor of shape (#channels,)
zero_points: Zero points tensor of shape (#channels,)
"""
if min_val.numel() == 0 or max_val.numel() == 0:
warnings.warn(
"must run observer before calling calculate_qparams.\
Returning default scale and zero point "
)
return torch.tensor([1.0]), torch.tensor([0])
if min_val.dim() == 0 or max_val.dim() == 0:
assert min_val <= max_val, "min {} should be less than max {}".format(
min_val, max_val
)
else:
assert torch.sum(min_val <= max_val) == len(min_val), "min {} should be less than max {}".format(
min_val, max_val
)
if self.dtype == torch.qint8:
qmin, qmax = -2**(self.reduce_range - 1), 2**(self.reduce_range - 1) - 1
# if self.reduce_range:
# qmin, qmax = -64, 63
# else:
# qmin, qmax = -128, 127
else:
qmin, qmax = 0, 2**(self.reduce_range) - 1
# if self.reduce_range:
# qmin, qmax = 0, 127
# else:
# qmin, qmax = 0, 255
min_val = torch.min(min_val, torch.zeros_like(min_val))
max_val = torch.max(max_val, torch.zeros_like(max_val))
scale = torch.ones(min_val.size(), dtype=torch.float32)
zero_point = torch.zeros(min_val.size(), dtype=torch.int64)
device = 'cuda' if min_val.is_cuda else 'cpu'
if self.qscheme == torch.per_tensor_symmetric or self.qscheme == torch.per_channel_symmetric:
max_val = torch.max(-min_val, max_val)
scale = max_val / (float(qmax - qmin) / 2)
scale = torch.max(scale, torch.tensor(self.eps, device=device, dtype=scale.dtype))
if self.dtype == torch.quint8:
zero_point = zero_point.new_full(zero_point.size(), 128)
else:
scale = (max_val - min_val) / float(qmax - qmin)
scale = torch.max(scale, torch.tensor(self.eps, device=device, dtype=scale.dtype))
zero_point = qmin - torch.round(min_val / scale)
zero_point = torch.max(zero_point, torch.tensor(qmin, device=device, dtype=zero_point.dtype))
zero_point = torch.min(zero_point, torch.tensor(qmax, device=device, dtype=zero_point.dtype))
# For scalar values, cast them to Tensors of size 1 to keep the shape
# consistent with default values in FakeQuantize.
if len(scale.shape) == 0:
# TODO: switch to scale.item() after adding JIT support
scale = torch.tensor([float(scale)], dtype=scale.dtype)
if len(zero_point.shape) == 0:
# TODO: switch to zero_point.item() after adding JIT support
zero_point = torch.tensor([int(zero_point)], dtype=zero_point.dtype)
return scale, zero_point
class ConstantObserver(MinMaxObserver):
def __init__(self, q_min=0.0, q_max=1.0, dtype=torch.quint8,
qscheme=torch.per_tensor_affine, reduce_range=8):
super(ConstantObserver, self).__init__(dtype=dtype,
qscheme=qscheme,
reduce_range=reduce_range)
self.q_min = q_min
self.q_max = q_max
def forward(self, x_orig):
self.min_val = 0
self.max_val = _ACTMAX
return x_orig
@torch.jit.export
def _calculate_qparams(self, min_val, max_val):
# type: (Tensor, Tensor) -> Tuple[Tensor, Tensor]
r"""Calculates the quantization parameters, given min and max
value tensors. Works for both per tensor and per channel cases
Args:
min_val: Minimum values per channel
max_val: Maximum values per channel
Returns:
scales: Scales tensor of shape (#channels,)
zero_points: Zero points tensor of shape (#channels,)
"""
if min_val.numel() == 0 or max_val.numel() == 0:
warnings.warn(
"must run observer before calling calculate_qparams.\
Returning default scale and zero point "
)
return torch.tensor([1.0]), torch.tensor([0])
if min_val.dim() == 0 or max_val.dim() == 0:
assert min_val <= max_val, "min {} should be less than max {}".format(
min_val, max_val
)
else:
assert torch.sum(min_val <= max_val) == len(min_val), "min {} should be less than max {}".format(
min_val, max_val
)
if self.dtype == torch.qint8:
qmin, qmax = -2**(self.reduce_range - 1), 2**(self.reduce_range - 1) - 1
# if self.reduce_range:
# qmin, qmax = -64, 63
# else:
# qmin, qmax = -128, 127
else:
qmin, qmax = 0, 2**(self.reduce_range) - 1
# if self.reduce_range:
# qmin, qmax = 0, 127
# else:
# qmin, qmax = 0, 255
min_val = torch.zeros_like(min_val)#torch.min(min_val, torch.zeros_like(min_val))
max_val = torch.ones_like(min_val) * _ACTMAX##torch.max(max_val, torch.zeros_like(max_val))
scale = torch.ones(min_val.size(), dtype=torch.float32)
zero_point = torch.zeros(min_val.size(), dtype=torch.int64)
device = 'cuda' if min_val.is_cuda else 'cpu'
if self.qscheme == torch.per_tensor_symmetric or self.qscheme == torch.per_channel_symmetric:
max_val = torch.max(-min_val, max_val)
scale = max_val / (float(qmax - qmin) / 2)
scale = torch.max(scale, torch.tensor(self.eps, device=device, dtype=scale.dtype))
if self.dtype == torch.quint8:
zero_point = zero_point.new_full(zero_point.size(), 128)
else:
scale = (max_val - min_val) / float(qmax - qmin)
scale = torch.max(scale, torch.tensor(self.eps, device=device, dtype=scale.dtype))
zero_point = qmin - torch.round(min_val / scale)
zero_point = torch.max(zero_point, torch.tensor(qmin, device=device, dtype=zero_point.dtype))
zero_point = torch.min(zero_point, torch.tensor(qmax, device=device, dtype=zero_point.dtype))
# For scalar values, cast them to Tensors of size 1 to keep the shape
# consistent with default values in FakeQuantize.
if len(scale.shape) == 0:
# TODO: switch to scale.item() after adding JIT support
scale = torch.tensor([float(scale)], dtype=scale.dtype)
if len(zero_point.shape) == 0:
# TODO: switch to zero_point.item() after adding JIT support
zero_point = torch.tensor([int(zero_point)], dtype=zero_point.dtype)
return scale, zero_point
|
import scipy.io as sio
import math
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from matplotlib import pyplot as plt
from datetime import datetime
import WetLab_NewportDelayStage as dstage
import time
import os
from ctypes import *
import ctypes as ct
ADQAPI = cdll.LoadLibrary("ADQAPI.dll")
#
ADQAPI.CreateADQControlUnit.restype = c_void_p
#ADQAPI.ADQ14_GetRevision.restype = c_void_p
ADQAPI.ADQControlUnit_FindDevices.argtypes = [c_void_p]
dt=datetime
thislogfilename='%i%s%s_%s%s%s.log' % (dt.today().year,str(dt.today().month).zfill(2),str(dt.today().day).zfill(2), \
str(dt.today().hour).zfill(2), \
str(dt.today().minute).zfill(2), \
str(dt.today().second).zfill(2))
class HEADER(ct.Structure):
_fields_ = [("RecordStatus", ct.c_ubyte),
("UserID", ct.c_ubyte),
("Channel", ct.c_ubyte),
("DataFormat", ct.c_ubyte),
("SerialNumber", ct.c_uint32),
("RecordNumber", ct.c_uint32),
("SamplePeriod", ct.c_int32),
("Timestamp", ct.c_int64),
("RecordStart", ct.c_int64),
("RecordLength", ct.c_uint32),
("Reserved", ct.c_uint32)]
class ADC_DAQ():
def __init__(self,f):
self.f=f
self.buffers_filled=c_uint(0)
#self.ProgressBar=[]
# Conversion factors
self.mv_conv=(2**16)/300.0
self.ns_conv=2
#Acquisition parameters
self.acqmode=1 # Choose either SINGLE_SHOT(0) or WAVEFORM_AVG(1)
self.ltofA=2000 # %Record length per shot in ns
self.ltofB=2000 # %Record length per shot in ns
self.analogbiasA_mv = 0.0 # Adjustable analog bias (DC offset) in mV. Range is +/-150mV
self.analogbiasA=np.round(self.analogbiasA_mv*self.mv_conv)
self.analogbiasB_mv =0.0 # Adjustable analog bias (DC offset) in mV. Range is +/-150mV
self.analogbiasB=np.round(self.analogbiasB_mv*self.mv_conv)
self.channel=2
self.threshold=150
#Stream options
self.baserefreshrate = 0.05
self.buffers_filled = ct.c_uint(0)
self.target_buffers = 0
self.headerbufp_list = 0
self.samples_added=0
self.headers_added=0
self.header_status=0
self.collect_result=0
#Delay Scan parameters
self.dscanmode=0
self.dscanstart=0
self.dscanstop=80
self.dscanstep=1
self.dscanrange=np.arange(self.dscanstart,self.dscanstop,self.dscanstep)
# Recording options
self.nbuffrecords=2 # Number of buffer records
self.nrecords=1000 # Number of records per sample
self.nstreamrecords = 100
self.nsaverecords=1000
self.progressrecords=0.0000001
self.progressflag=0
self.nsamplesA=self.ltofA*2 #samples per buffer record
self.nsamplesB=self.ltofB*2 #samples per buffer record
self.buffer_sizeA = self.nsaverecords*self.nsamplesA
self.buffer_sizeB = self.nsaverecords*self.nsamplesB
self.bytes_per_sample = 2 #2 for 16bits
self.data_chA=np.zeros((self.nsamplesA)).astype(np.int64)
self.data_chB=np.zeros((self.nsamplesA)).astype(np.int64)
# Trigger options
self.triggermode=2 # choose: 'CH_A(3)','CH_B(3)' or 'EXTERNAL_TRIGGER (2) or SOFTWARE_TRIGGER (1)'
self.trigchannel=1 # Choose 'CH_A(1)','CH_B(2)'
self.trig_edge = 1 #RISING_EDGE(1) or FALLING EDGE(0)
self.triglevel_mv =500.0 #Trigger threshold in mV => For a level trigger this must be in the range +/-150mV. For external trigger this must be in the range -500mV to +3300mV.
self.record_start_shift = 'NONE' #choose 'PRETRIGGER', 'HOLDOFF' OR 'NONE'
self.pretrigger_ns=0 #only applicable if 'PRETRIGGER' is selected.
self.holdoff_ns=0 #only applicable if 'HOLDOFF' is selected.
self.f.write('nsamplesA: %i, nrecords: %i, buffer size: %i, channel: %i, Triggermode:%i, dscanmode: %i, acqmode: %i\n'\
% (self.nsamplesA,self.nrecords,self.buffer_sizeA,self.channel,self.triggermode,self.dscanmode,self.acqmode))
# Connect with the digitizer
self.adq_cu = c_void_p(ADQAPI.CreateADQControlUnit())
ADQAPI.ADQControlUnit_FindDevices(self.adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(self.adq_cu)
err2=ADQAPI.ADQControlUnit_GetLastFailedDeviceError(self.adq_cu)
n_of_ADQ14 = ADQAPI.ADQControlUnit_NofADQ14(self.adq_cu)
err3=ADQAPI.ADQControlUnit_GetLastFailedDeviceError(self.adq_cu)
self.f.write('initialisation values: %i,%i,%i,%i \n' % (n_of_ADQ,n_of_ADQ14,err2,err3))
# Adjustable input range and bias
self.VrangeChA=500.0
self.VrangeChB=500.0
self.VbiasChA=0.0
self.VbiasChB=0.0
setVrangeA=ct.c_float(self.VrangeChA)
setVrangeB=ct.c_float(self.VrangeChB)
setVbiasA=ct.c_float(self.VbiasChA)
setVbiasB=ct.c_float(self.VbiasChB)
if ADQAPI.ADQ_HasAdjustableInputRange(self.adq_cu, 1):
success=ADQAPI.ADQ_SetInputRange(self.adq_cu, 1,1,ct.c_float(self.VrangeChA),ct.byref(setVrangeA))
success=ADQAPI.ADQ_SetInputRange(self.adq_cu, 1,1,ct.c_float(self.VrangeChB),ct.byref(setVrangeB))
self.f.write('Vrange CHA =%0.2f (mVpp)\n' % setVrangeA.value)
self.headerbuf_list=[]
if (n_of_ADQ14 != 0):
self.f.write('found ADQ device \n')
#ADQAPI.ADQControlUnit_EnableErrorTraceAppend(self.adq_cu,3,'C:/Documents/...')
self.f.write('enable ADQ log trace \n')
def __del__(self):
success = ADQAPI.DeleteADQControlUnit(self.adq_cu)
if (success == 0):
self.f.write('Delete ADQ control failed.\n')
self.f.close()
# GUI interaction functions
def setExtTriggerlevel(self,triglvl):
self.triglevel_mv=triglvl*1000.0
def setSignalThreshold(self,signalthresh):
self.threshold=signalthresh
def setDigitizerParameters(self,ParametersArray):
self.f.write('set Dig Params\n')
self.nsamplesA=ParametersArray[0]*2
self.nrecords=ParametersArray[1] # Number of records per sample
self.buffer_sizeA = self.nsaverecords*self.nsamplesA
#
self.channel=ParametersArray[2]
self.triggermode=ParametersArray[3]
#
self.dscanmode=ParametersArray[4]
self.dscanstart=ParametersArray[5]
self.dscanstop=ParametersArray[6]
self.dscanstep=ParametersArray[7]
self.acqmode=ParametersArray[8]
if (len(ParametersArray[9])!=0):
self.dscanrange=ParametersArray[9]
else:
self.dscanrange=np.arange(self.dscanstart,self.dscanstop,self.dscanstep)
if ParametersArray[10]!=self.VrangeChA:
self.VrangeChA=ParametersArray[10]
setVrangeA=ct.c_float(self.VrangeChA)
if ADQAPI.ADQ_HasAdjustableInputRange(self.adq_cu, 1):
try:
success=ADQAPI.ADQ_SetInputRange(self.adq_cu, 1,1,ct.c_float(self.VrangeChA),ct.byref(setVrangeA))
self.f.write('Vrange CHA =%0.2f (mVpp)\n' % setVrangeA.value)
except:
self.f.write('error Vrange CHA =%0.2f (mVpp)\n' % setVrangeA.value)
success=ADQAPI.ADQ_GetInputRange(self.adq_cu, 1,1,ct.byref(setVrangeA))
self.f.write('get Vrange CHA =%0.2f (mVpp)\n' % setVrangeA.value)
if not ParametersArray[11]==self.VrangeChB:
self.VrangeChB=ParametersArray[11]
setVrangeB=ct.c_float(self.VrangeChB)
if ADQAPI.ADQ_HasAdjustableInputRange(self.adq_cu, 1):
success=ADQAPI.ADQ_SetInputRange(self.adq_cu, 1,1,ct.c_float(self.VrangeChB),ct.byref(setVrangeB))
if not ParametersArray[12]==self.VbiasChA:
self.VbiasChA=ParametersArray[12]
setVbiasA=ct.c_float(self.VbiasChA)
if ADQAPI.ADQ_HasAdjustableInputRange(self.adq_cu, 1):
success=ADQAPI.ADQ_SetInputRange(self.adq_cu, 1,1,ct.c_float(self.VbiasChA),ct.byref(setVbiasChA))
if not ParametersArray[13]==self.VbiasChB:
self.VbiasChB=ParametersArray[13]
setVbiasB=ct.c_float(self.VbiasChB)
if ADQAPI.ADQ_HasAdjustableInputRange(self.adq_cu, 1):
success=ADQAPI.ADQ_SetInputRange(self.adq_cu, 1,1,ct.c_float(self.VbiasChB),ct.byref(setVbiasChB))
if (ParametersArray[14]!=self.pretrigger_ns and ParametersArray[14]!=0):
self.pretrigger_ns=ParametersArray[14]
else:
self.pretrigger_ns=0
if (ParametersArray[15]!=self.holdoff_ns and ParametersArray[15]!=0):
self.holdoff_ns=ParametersArray[15]
self.f.write('Hold off val:%i\n'%self.holdoff_ns)
else:
self.holdoff_ns=0
self.f.write('nsamplesA: %i, nrecords: %i, buffer size: %i, channel: %i, Triggermode:%i, dscanmode: %i, acqmode: %i\n'\
% (self.nsamplesA,self.nrecords,self.buffer_sizeA,self.channel,self.triggermode,self.dscanmode,self.acqmode))
def StartRecording(self,foldername):
#StartProgressBar()
#try:
#self.ProgressBar=QtGui.QProgressDialog('Acquisition in progress','Abort',0,100)
#self.ProgressBar.show()
#self.ProgressBar.setValue(0)
#except:
#print 'ERROR starting progress bar dialog box'
success = ADQAPI.ADQ_SetSampleSkip(self.adq_cu,1,1)
if (success == 0):
self.f.write('ADQ_SetSampleSkip failed.\n')
self.f.write('bp3\n')
#success = ADQAPI.ADQ_SetAdjustableBias(self.adq_cu,1,0,self.analogbiasA)
#if (success == 0):
# print('ADQ_SetAdjustableBias failed.')
#success = ADQAPI.ADQ_SetAdjustableBias(self.adq_cu,1,1,self.analogbiasB)
#if (success == 0):
# print('ADQ_SetAdjustableBias failed.')
success = ADQAPI.ADQ_SetTriggerMode(self.adq_cu,1, self.triggermode)
if (success == 0):
self.f.write('ADQ_SetTriggerMode failed.\n')
self.f.write('bp4\n')
#trigth=0.6
if self.triggermode==1:
success = ADQAPI.ADQ_SetLvlTrigLevel(self.adq_cu, 1, 0)
if (success == 0):
self.f.write('ADQ_SetLvlTrigLevel failed.')
success = ADQAPI.ADQ_SetTrigLevelResetValue(self.adq_cu,1, 1000)
if (success == 0):
self.f.write('ADQ_SetTrigLevelResetValue failed.')
success = ADQAPI.ADQ_SetLvlTrigChannel(self.adq_cu,1, 1)
if (success == 0):
self.f.write('ADQ_SetLvlTrigChannel failed.')
success = ADQAPI.ADQ_SetLvlTrigEdge(self.adq_cu,1, self.trig_edge)
if (success == 0):
self.f.write('ADQ_SetLvlTrigEdge failed.')
if self.triggermode==2:
success = ADQAPI.ADQ_SetExtTrigThreshold(self.adq_cu,1,1,c_double(self.triglevel_mv/1000.0))
if (success == 0):
self.f.write('ADQ_SetExternTrigLevel failed.\n')
success = ADQAPI.ADQ_SetExternTrigEdge(self.adq_cu,1, self.trig_edge)
if (success == 0):
self.f.write('ADQ_SetExternTrigEdge failed.\n')
if self.triggermode==3:
triglvl=int(round(self.triglevel_mv*self.mv_conv))
success = ADQAPI.ADQ_SetLvlTrigChannel(self.adq_cu,1, self.trigchannel)
if (success == 0):
self.f.write('DParam: ADQ_SetLvlTrigChannel failed.\n')
success = ADQAPI.ADQ_SetLvlTrigLevel(self.adq_cu,1, triglvl)
if (success == 0):
self.f.write('DParam: ADQ_SetLvlTrigLevel failed.\n')
success = ADQAPI.ADQ_SetLvlTrigEdge(self.adq_cu,1, self.trig_edge)
if (success == 0):
self.f.write('DParam: ADQ_SetLvlTrigEdge failed.\n')
### HOLDOFF SAMPLES ###
#HOLDOFFSAMPLE=int(self.holdoff_ns*2)
#success=ADQAPI.ADQ_SetTriggerHoldOffSamples(ct.c_uint(self.holdoff_ns*2))
#if success==0:
# self.f.write('DParam: ADQ_SetTriggerHoldOffSamples failed.\n')
### REINIT THE SIZE OF THE DATA STORAGE FOR CHA AND B
self.data_chA=np.zeros((self.nsamplesA),dtype=np.int64)
self.data_chB=np.zeros((self.nsamplesA),dtype=np.int64)
self.progressrecords=0.0000001
self.progressflag=0
### DSCAN OFF ###
if self.dscanmode==0:
try:
avgtraceA=np.zeros((self.nsamplesA),dtype=np.int64)
avgtraceB=np.zeros((self.nsamplesA),dtype=np.int64)
except:
self.f.write('Initialisation of average scan matrix failed.\n')
success=ADQAPI.ADQ_MultiRecordSetup(self.adq_cu,1,self.nrecords,self.nsamplesA)
if (success == 0):
self.f.write('Recording: ADQ_MultiRecordSetup failed.\n')
else:
self.f.write('Recording: ADQ_MultiRecordSetup SUCCESS.\n')
self.f.write('bp7\n')
acquiredrecord=0
savestart= 0
NumberOfRecords = self.nsaverecords
ChannelsMask = 0xF
StartSample = 0
saveend=self.nsaverecords
success=ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
if (success == 0):
self.f.write('Recording: ADQ_DisarmTrigger failed.\n')
success=ADQAPI.ADQ_ArmTrigger(self.adq_cu,1)
if (success == 0):
self.f.write('Recording: ADQ_ArmTrigger failed.\n')
i=0
if self.acqmode==1:
while (acquiredrecord<self.nrecords):
acquiredrecord=ADQAPI.ADQ_GetAcquiredRecords(self.adq_cu,1)
max_number_of_channels = 2
target_buffers=(POINTER(c_int16*self.nsamplesA*self.nsaverecords)*max_number_of_channels)()
for bufp in target_buffers:
bufp.contents = (c_int16*self.nsamplesA*self.nsaverecords)()
#self.f.write('bp10; nofacq: %i\n' % acquiredrecord)
if (acquiredrecord>=saveend):
savestart=saveend-self.nsaverecords
#4try:
ADQAPI.ADQ_GetData(self.adq_cu,1,target_buffers,self.buffer_sizeA,self.bytes_per_sample,savestart,NumberOfRecords,ChannelsMask,StartSample,self.nsamplesA,0x00)
data_16bit_ch0 = np.reshape(np.frombuffer(target_buffers[0].contents,dtype=np.int16),(self.nsaverecords,self.nsamplesA))
baselineCh0=np.median(data_16bit_ch0[:,:500],axis=1).astype(np.int64)
data_16bit_ch0=((data_16bit_ch0.T-baselineCh0).T)
data_16bit_ch0[data_16bit_ch0>=self.threshold]=0
data_16bit_ch1 = np.frombuffer(target_buffers[1].contents,dtype=np.int16)
self.data_chA+=data_16bit_ch0.sum(0)
self.data_chB+=np.reshape(data_16bit_ch1,(self.nsaverecords,self.nsamplesA)).sum(0)
#except:
# self.f.write('failed recording average trace\n')
i+=1
saveend+=self.nsaverecords
self.progressrecords=acquiredrecord/np.float(self.nrecords)
#self.ProgressBar.setValue(np.round(100*acquiredrecord/np.float(self.nrecords)))
dataavg={'Scan_ChA':self.data_chA, \
'Scan_ChB':self.data_chB}
path_mat='%s/ScanAvg.mat' % (foldername)
try:
sio.savemat(path_mat,dataavg)
except:
self.f.write('failed saving average trace\n')
else:
if not os.path.exists('%s/SShot' % (foldername)):
os.makedirs('%s/SShot' % (foldername))
while (acquiredrecord<self.nrecords):
acquiredrecord=ADQAPI.ADQ_GetAcquiredRecords(self.adq_cu,1)
max_number_of_channels = 2
#target_headers=(POINTER(c_int64*self.nsaverecords))()
#for headp in target_headers:
# headp.contents= (c_int64*self.nsaverecords)()
target_buffers=(POINTER(c_int16*self.nsamplesA*self.nsaverecords)*max_number_of_channels)()
for bufp in target_buffers:
bufp.contents = (c_int16*self.nsamplesA*self.nsaverecords)()
#self.f.write('bp10; nofacq: %i\n' % acquiredrecord)
if (acquiredrecord>=saveend):
savestart=saveend-self.nsaverecords
try:
ADQAPI.ADQ_GetData(self.adq_cu,1,target_buffers,self.buffer_sizeA,self.bytes_per_sample,savestart,NumberOfRecords,ChannelsMask,StartSample,self.nsamplesA,0x00)
data_16bit_ch0 = np.frombuffer(target_buffers[0].contents,dtype=np.int16)
data_16bit_ch1 = np.frombuffer(target_buffers[1].contents,dtype=np.int16)
tmp=np.copy(data_16bit_ch0)
tmp=np.reshape(tmp,(self.nsaverecords,self.nsamplesA))
baseline_tmp=np.median(tmp[:,:500],axis=1).astype(np.int64)
tmp=((tmp.T-baseline_tmp).T)
tmp[tmp>=self.threshold]=0
self.data_chA+=tmp.sum(0)
self.data_chB+=np.reshape(data_16bit_ch1,(self.nsaverecords,self.nsamplesA)).sum(0)
data={'specmat_ChA':data_16bit_ch0,'specmat_ChB':data_16bit_ch1}#,'timestamps':timestamps}
path_mat='%s/SShot/specfile_%s.mat' % (foldername,str(i).zfill(3))
#path_npz='%s/specfile_%i.npz' % (foldername,i)
try:
sio.savemat(path_mat,data)
#np.savez(path_npz,**data)
except:
self.f.write('failed saving singleshot trace\n')
except:
self.f.write('failed recording singleshot trace\n')
i+=1
saveend+=self.nsaverecords
self.progressrecords=acquiredrecord/np.float(self.nrecords)
#self.ProgressBar.setValue(np.round(100*acquiredrecord/np.float(self.nrecords)))
dataavg={'Scan_ChA':self.data_chA, \
'Scan_ChB':self.data_chB}
path_mat='%s/ScanAvg.mat' % (foldername)
try:
sio.savemat(path_mat,dataavg)
except:
self.f.write('failed saving average trace\n')
success=ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
if (success == 0):
self.f.write('Recording: ADQ_DisarmTrigger failed.\n')
success=ADQAPI.ADQ_MultiRecordClose(self.adq_cu,1);
if (success == 0):
self.f.write('Recording: ADQ_MultiRecordClose failed.\n')
self.f.write('Acquisition finished at %s:%s:%s' % (str(dt.today().hour).zfill(2), \
str(dt.today().minute).zfill(2), \
str(dt.today().second).zfill(2)))
### DSCAN ON ###
elif self.dscanmode==1:
#if self.acqmode==1:
try:
avgscanA=np.zeros((len(self.dscanrange),self.nsamplesA),dtype=np.int64)
avgscanB=np.zeros((len(self.dscanrange),self.nsamplesA),dtype=np.int64)
except:
self.f.write('Initialisation of average scan matrix failed.\n')
for j,delayval in enumerate(self.dscanrange):
# Change the delay on the delaystage (humongium computer)
#dstage('//155.198.197.13/CEP_remotecontrol/',delayval)
if self.acqmode==0:
if not os.path.exists('%s/SSdelay%s' % (foldername,str(j).zfill(2))):
os.makedirs('%s/SSdelay%s' % (foldername,str(j).zfill(2)))
#if self.acqmode==1:
try:
self.data_chA=np.zeros((self.nsamplesA),dtype=np.int64)
self.data_chB=np.zeros((self.nsamplesA),dtype=np.int64)
except:
self.f.write('Initialisation of average trace failed.\n')
# Wait for 1 second that the stage has moved
time.sleep(1.0)
success=ADQAPI.ADQ_MultiRecordSetup(self.adq_cu,1,self.nrecords,self.nsamplesA)
if (success == 0):
self.f.write('Recording: ADQ_MultiRecordSetup failed.\n')
else:
self.f.write('Recording: ADQ_MultiRecordSetup SUCCESS.\n')
self.f.write('bp7\n')
acquiredrecord=0
savestart= 0
NumberOfRecords = self.nsaverecords
ChannelsMask = 0xF
StartSample = 0
saveend=self.nsaverecords
success=ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
if (success == 0):
self.f.write('Recording: ADQ_DisarmTrigger failed.\n')
success=ADQAPI.ADQ_ArmTrigger(self.adq_cu,1)
if (success == 0):
self.f.write('Recording: ADQ_ArmTrigger failed.\n')
i=0
if self.acqmode==1: #Average trace case
while (acquiredrecord<self.nrecords):
acquiredrecord=ADQAPI.ADQ_GetAcquiredRecords(self.adq_cu,1)
max_number_of_channels = 2
target_buffers=(POINTER(c_int16*self.nsamplesA*self.nsaverecords)*max_number_of_channels)()
for bufp in target_buffers:
bufp.contents = (c_int16*self.nsamplesA*self.nsaverecords)()
#self.f.write('bp10; nofacq: %i\n' % acquiredrecord)
if (acquiredrecord>=saveend):
savestart=saveend-self.nsaverecords
try:
ADQAPI.ADQ_GetData(self.adq_cu,1,target_buffers,self.buffer_sizeA,self.bytes_per_sample,savestart,NumberOfRecords,ChannelsMask,StartSample,self.nsamplesA,0x00)
data_16bit_ch0 = np.reshape(np.frombuffer(target_buffers[0].contents,dtype=np.int16),(self.nsaverecords,self.nsamplesA))
baselineCh0=np.median(data_16bit_ch0[:,:500],axis=1).astype(np.int64)
data_16bit_ch0=((data_16bit_ch0.T-baselineCh0).T)
data_16bit_ch0[data_16bit_ch0>=self.threshold]=0
data_16bit_ch1 = np.reshape(np.frombuffer(target_buffers[1].contents,dtype=np.int16),(self.nsaverecords,self.nsamplesA))
#baselineCh1=np.median(data_16bit_ch1[:,:500],axis=1)
#data_16bit_ch1=((data_16bit_ch1.T-baselineCh1).T)
#data_16bit_ch1[data_16bit_ch1>=self.threshold]=0
self.data_chA+=(data_16bit_ch0).sum(0)
self.data_chB+=(data_16bit_ch1).sum(0)
except:
self.f.write('failed recording average trace\n')
i+=1
saveend+=self.nsaverecords
self.progressrecords=(acquiredrecord/np.float(self.nrecords))*(j/len(self.dscanrange))
try:
avgscanA[j,:]=self.data_chA
avgscanB[j,:]=self.data_chB
except:
self.f.write('failed building average scan\n')
else: #Single Shot trace case (With Averaging)
while (acquiredrecord<self.nrecords):
acquiredrecord=ADQAPI.ADQ_GetAcquiredRecords(self.adq_cu,1)
max_number_of_channels = 2
#target_headers=(POINTER(c_int64*self.nsaverecords))()
#for headp in target_headers:
# headp.contents= (c_int64*self.nsaverecords)()
target_buffers=(POINTER(c_int16*self.nsamplesA*self.nsaverecords)*max_number_of_channels)()
for bufp in target_buffers:
bufp.contents = (c_int16*self.nsamplesA*self.nsaverecords)()
#self.f.write('bp10; nofacq: %i\n' % acquiredrecord)
if (acquiredrecord>=saveend):
savestart=saveend-self.nsaverecords
try:
ADQAPI.ADQ_GetData(self.adq_cu,1,target_buffers,self.buffer_sizeA,self.bytes_per_sample,savestart,NumberOfRecords,ChannelsMask,StartSample,self.nsamplesA,0x00)
data_16bit_ch0 = np.frombuffer(target_buffers[0].contents,dtype=np.int16)
data_16bit_ch1 = np.frombuffer(target_buffers[1].contents,dtype=np.int16)
tmp=np.copy(data_16bit_ch0)
tmp=np.reshape(tmp,(self.nsaverecords,self.nsamplesA))
baseline_tmp=np.median(tmp[:,:500],axis=1).astype(np.int64)
tmp=((tmp.T-baseline_tmp).T)
tmp[tmp>=self.threshold]=0
self.data_chA+=tmp.sum(0)
self.data_chB+=np.reshape(data_16bit_ch1,(self.nsaverecords,self.nsamplesA)).sum(0)
#timestamps=np.frombuffer(target_headers.Timestamp,dtype=np.int64)
data={'specmat_ChA':data_16bit_ch0,'specmat_ChB':data_16bit_ch1}#'timestamps':timestamps}
path_mat='%s/SSdelay%s/specfile_%s.mat' % (foldername,str(j).zfill(2),str(i).zfill(3))
#path_npz='%s/SSdelay%i/specfile_%i.npz' % (foldername,j,i)
try:
sio.savemat(path_mat,data)
#np.savez(path_npz,**data)
except:
self.f.write('failed saving singleshot trace\n')
except:
self.f.write('failed recording singleshot trace\n')
i+=1
saveend+=self.nsaverecords
try:
avgscanA[j,:]=self.data_chA
avgscanB[j,:]=self.data_chB
except:
self.f.write('failed building average scan\n')
self.progressrecords=(acquiredrecord/np.float(self.nrecords))*(j/len(self.dscanrange))
#self.ProgressBar.setValue(np.round(100*j/np.float(len(self.dscanrange))))
#if self.acqmode==1:
dataavg={'Scan_ChA':avgscanA, \
'Scan_ChB':avgscanB,\
'Delay':self.dscanrange}
path_mat='%s/ScanAvg.mat' % (foldername)
try:
sio.savemat(path_mat,dataavg)
#path_npz='%s/ScanAvg.npz' % (foldername,i)
#np.savez(path_npz,**data)
except:
self.f.write('failed saving avg trace\n')
success=ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
if (success == 0):
self.f.write('Recording: ADQ_DisarmTrigger failed.\n')
success=ADQAPI.ADQ_MultiRecordClose(self.adq_cu,1);
if (success == 0):
self.f.write('Recording: ADQ_MultiRecordClose failed.\n')
self.f.write('Acquisition finished at %s:%s:%s' % (str(dt.today().hour).zfill(2), \
str(dt.today().minute).zfill(2), \
str(dt.today().second).zfill(2)))
self.progressflag=1
self.StopRecording
##
def StopRecording(self):
#
ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
ADQAPI.ADQ_MultiRecordClose(self.adq_cu,1);
#
def StartStream(self):
self.nstreamrecords = 100
max_number_of_channels = 2
## Initiate the data and header buffer
success = ADQAPI.ADQ_SetSampleSkip(self.adq_cu,1,0)
if (success == 0):
self.f.write('ADQ_SetSampleSkip failed.\n')
success = ADQAPI.ADQ_SetTriggerMode(self.adq_cu,1, self.triggermode)
if (success == 0):
self.f.write('ADQ_SetTriggerMode failed.\n')
if self.triggermode==1:
success = ADQAPI.ADQ_SetLvlTrigLevel(self.adq_cu, 1, 0)
if (success == 0):
self.f.write('ADQ_SetLvlTrigLevel failed.\n')
success = ADQAPI.ADQ_SetTrigLevelResetValue(self.adq_cu, 1, 1000)
if (success == 0):
self.f.write('ADQ_SetTrigLevelResetValue failed.\n')
success = ADQAPI.ADQ_SetLvlTrigChannel(self.adq_cu, 1, 1)
if (success == 0):
self.f.write('ADQ_SetLvlTrigChannel failed.\n')
success = ADQAPI.ADQ_SetLvlTrigEdge(self.adq_cu, 1, self.trig_edge)
if (success == 0):
self.f.write('ADQ_SetLvlTrigEdge failed.\n')
if self.triggermode==2:
success = ADQAPI.ADQ_SetExtTrigThreshold(self.adq_cu,1,1,c_double(self.triglevel_mv/1000.0))
if (success == 0):
print 'ADQ_SetExternTrigLevel failed.\n'
success = ADQAPI.ADQ_SetExternTrigEdge(self.adq_cu,1, self.trig_edge)
if (success == 0):
print 'ADQ_SetExternTrigEdge failed.\n'
if self.triggermode==3:
triglvl=int(round(self.triglevel_mv*self.mv_conv))
success = ADQAPI.ADQ_SetLvlTrigChannel(self.adq_cu,1, self.trigchannel)
if (success == 0):
print 'DParam: ADQ_SetLvlTrigChannel failed.\n'
success = ADQAPI.ADQ_SetLvlTrigLevel(self.adq_cu,1, triglvl)
if (success == 0):
print 'DParam: ADQ_SetLvlTrigLevel failed.\n'
success = ADQAPI.ADQ_SetLvlTrigEdge(self.adq_cu,1, self.trig_edge)
if (success == 0):
print 'DParam: ADQ_SetLvlTrigEdge failed.\n'
ADQAPI.ADQ_TriggeredStreamingSetup(self.adq_cu,1,self.nstreamrecords,self.nsamplesA,0,0,0xf)
ADQAPI.ADQ_SetTransferBuffers(self.adq_cu,1,8,131072*8)
self.target_buffers=(POINTER(c_int16*self.nsamplesA*self.nstreamrecords)*max_number_of_channels)()
for bufp in self.target_buffers:
bufp.contents = (c_int16*self.nsamplesA*self.nstreamrecords)()
self.headerbuf_list = [(HEADER*self.nstreamrecords)() for ch in range(max_number_of_channels)]
# Create an C array of pointers to header buffers
self.headerbufp_list = ((ct.POINTER(HEADER*self.nstreamrecords))*max_number_of_channels)()
# Initiate pointers with allocated header buffers
for ch,headerbufp in enumerate(self.headerbufp_list):
headerbufp.contents = self.headerbuf_list[ch]
# Allocate length output variable
self.samples_added = (4*ct.c_uint)()
for ind in range(len(self.samples_added)):
self.samples_added[ind] = 0
self.headers_added = (4*ct.c_uint)()
for ind in range(len(self.headers_added)):
self.headers_added[ind] = 0
self.header_status = (4*ct.c_uint)()
for ind in range(len(self.header_status)):
self.header_status[ind] = 0
#print 'Start Streaming.\n'
ADQAPI.ADQ_StopStreaming(self.adq_cu,1)
if (success == 0):
print('Stop Streaming failed\n')
success=ADQAPI.ADQ_StartStreaming(self.adq_cu,1)
if (success == 0):
print('Start Streaming failed\n')
def RunStream(self):
self.buffers_filled.value = 0
collect_results = 1
ChannelsMask = 0xf
#if ADQAPI.ADQ_GetStreamOverflow()==1:
poll_time_diff_prev = time.time()
while ((self.buffers_filled.value==0) and (collect_results)):
collect_results = ADQAPI.ADQ_GetTransferBufferStatus(self.adq_cu,1,ct.byref(self.buffers_filled))
poll_time_diff=time.time()-poll_time_diff_prev
if poll_time_diff>0.05:
ADQAPI.ADQ_FlushDMA(self.adq_cu,1)
poll_time_diff_prev=time.time()
status = ADQAPI.ADQ_GetDataStreaming(self.adq_cu,1,\
self.target_buffers,\
self.headerbufp_list,\
0xf,\
ct.byref(self.samples_added),\
ct.byref(self.headers_added),\
ct.byref(self.header_status))
#print status
if (status == 0):
print('Data grab failed\n')
data_16bit_ch0 = np.frombuffer(self.target_buffers[0].contents,dtype=np.int16).reshape(self.nstreamrecords,self.nsamplesA).copy()#[:self.nsamplesA]
data_16bit_ch1 = np.frombuffer(self.target_buffers[1].contents,dtype=np.int16).reshape(self.nstreamrecords,self.nsamplesA).copy()#[:self.nsamplesA]
baselineCh0 = np.median(data_16bit_ch0[:500]).astype(np.int16)
baselineCh1 = np.median(data_16bit_ch1[:500]).astype(np.int16)
header=[]
for i in range(self.nstreamrecords):
header.append(self.headerbuf_list[0][i].Timestamp*0.125*1e-3)
return data_16bit_ch0.mean(0),data_16bit_ch1.mean(0),header
# ChannelsMask = 0xF
# StartSample = 0
#
# target_buffersStream=(POINTER(c_int16*self.nsamplesA*Navg)*2)()
# for bufp in target_buffersStream:
# bufp.contents = (c_int16*self.nsamplesA*Navg)()
#
# #if Navg<=1000:
# time.sleep(self.baserefreshrate)
# #else:
# #time.sleep(self.baserefreshrate*(np.floor(Navg/1000)+1))
#
# source_bufferStream=Navg*self.nsamplesA
# try:
# self.data_chA=np.zeros((self.nsamplesA),dtype=np.int64)
# self.data_chB=np.zeros((self.nsamplesA),dtype=np.int64)
# except:
# self.f.write('Initialisation of average trace failed.\n')
#
# try:
#
# success=ADQAPI.ADQ_GetData(self.adq_cu,1,target_buffersStream,source_bufferStream,self.bytes_per_sample,0,Navg,ChannelsMask,StartSample,self.nsamplesA,0x00)
#
# data_16bit_ch0 = np.reshape(np.frombuffer(target_buffersStream[0].contents,dtype=np.int16),(Navg,self.nsamplesA))
# data_16bit_ch1 = np.reshape(np.frombuffer(target_buffersStream[1].contents,dtype=np.int16),(Navg,self.nsamplesA))
#
# if Navg==1:
# baselineCh0=np.median(data_16bit_ch0[:,:500],axis=1).astype(np.int64)
# data_16bit_ch0=(data_16bit_ch0-baselineCh0)
# data_16bit_ch0[data_16bit_ch0>=self.threshold]=0
# self.data_chA=(data_16bit_ch0).sum(0)
#
# baselineCh1=np.median(data_16bit_ch1[:,:500],axis=1).astype(np.int64)
# data_16bit_ch1=(data_16bit_ch1-baselineCh1)
# data_16bit_ch1[data_16bit_ch1>=self.threshold]=0
# self.data_chB=(data_16bit_ch1).sum(0)
# else:
# baselineCh0=np.median(data_16bit_ch0[:,:500],axis=1).astype(np.int64)
# data_16bit_ch0=((data_16bit_ch0.T-baselineCh0).T)
# data_16bit_ch0[data_16bit_ch0>=self.threshold]=0
# self.data_chA=(data_16bit_ch0).sum(0)/np.float(Navg)
#
# baselineCh1=np.median(data_16bit_ch1[:,:500],axis=1).astype(np.int64)
# data_16bit_ch1=((data_16bit_ch1.T-baselineCh1).T)
# data_16bit_ch1[data_16bit_ch1>=self.threshold]=0
# self.data_chB=(data_16bit_ch1).sum(0)/np.float(Navg)
#
# if success==0:
# self.data_chA = np.zeros((self.nsamplesA),dtype=np.int64)
# self.data_chB = np.zeros((self.nsamplesA),dtype=np.int64)
#
# except:
# self.data_chA = np.zeros((self.nsamplesA),dtype=np.int64)
# self.data_chB = np.zeros((self.nsamplesA),dtype=np.int64)
# self.f.write('error reading buffer\n')
#
# if len(self.data_chA)==0 or len(self.data_chB)==0:
# self.data_chA = np.zeros((self.nsamplesA),dtype=np.int64)
# self.data_chB = np.zeros((self.nsamplesA),dtype=np.int64)
#
#
# ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
# ADQAPI.ADQ_MultiRecordClose(self.adq_cu,1);
#
def StopStream(self):
ADQAPI.ADQ_StopStreaming(self.adq_cu,1)
success=ADQAPI.ADQ_DisarmTrigger(self.adq_cu,1)
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': env.db('DEV_DB_URL')
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/domain/'
|
# 17269
cnt = list(map(int,input().split(" ")))
names = list(map(str, str(input()).upper().split(" ") ))
line = [3,2,1,2,4,3,1,3,1,1,3,1,3,2,1,2,2,2,1,2,1,1,1,2,2,1]
length = len(names[0]) if len(names[0]) >= len(names[1]) else len(names[1])
fit_num = []
for i in range(0, length) :
if i < cnt[0] :
fit_num.append(line[ord(names[0][i])-65])
if i < cnt[1] :
fit_num.append(line[ord(names[1][i])-65])
# 글자 만들기
tmp_num = []
while len(fit_num) > 2 :
tmp_len = len(fit_num)-1
# print("tmp_len", tmp_len)
for i in range(0, tmp_len) :
# print(i)
tmp_num.append( (fit_num[i] + fit_num[i+1])%10 )
fit_num = []
fit_num = tmp_num
tmp_num = []
fit_str = str(fit_num[1]) if fit_num[0] == 0 else str(fit_num[0])+str(fit_num[1])
print("{}%".format(fit_str))
|
#!/usr/bin/env python3
"""
A set of utilities for handling rr graph GSB data.
"""
import os
from collections import namedtuple
import lxml.etree as ET
# =============================================================================
class GsbEntry:
"""
Stores a single GSB entry which corresponds to a single N:1 mux
"""
Node = namedtuple("Node", "id type side segment_id grid_side index")
def __init__(self, xml_entry):
"""
Builds a GsbEntry object from an XML ElementTree object
"""
# Check type
assert xml_entry.tag in ["CHANX", "CHANY", "IPIN"], xml_entry.tag
# Sink node info
segment_id = xml_entry.get("segment_id", None)
if segment_id is not None:
segment_id = int(segment_id)
self.node = GsbEntry.Node(
id = int(xml_entry.attrib["node_id"]),
type = xml_entry.tag,
side = xml_entry.attrib["side"],
segment_id = segment_id,
grid_side = None,
index = int(xml_entry.attrib["index"]),
)
GsbEntry.check_node(self.node)
self.mux_size = int(xml_entry.attrib["mux_size"])
# Driver nodes
self.drivers = []
for xml_driver in xml_entry.findall("driver_node"):
segment_id = xml_driver.get("segment_id", None)
if segment_id is not None:
segment_id = int(segment_id)
node = GsbEntry.Node(
id = int(xml_driver.attrib["node_id"]),
type = xml_driver.attrib["type"],
side = xml_driver.attrib["side"],
segment_id = segment_id,
grid_side = xml_driver.get("grid_side", None),
index = int(xml_driver.attrib["index"]),
)
GsbEntry.check_node(node)
self.drivers.append(node)
@staticmethod
def check_node(node):
"""
Throws an assertion if some node data is incorrect
"""
assert node.type in ["CHANX", "CHANY", "IPIN", "OPIN"], node
assert node.side in ["left", "top", "right", "bottom"], node
def dump(self):
"""
Dumps GSB data.
"""
print("GSB: {}".format(self.node))
for i, driver in enumerate(self.drivers):
print(" {:2d} {}".format(i, driver))
# =============================================================================
def load_gsb_data(path, pbar=lambda x: x):
"""
Loads GSB data for a routing graph stored in files under the given path
"""
gsb_data = {}
# Loop over all XML files found and read them
for fname in pbar(os.listdir(path)):
# Check if this looks like an XML file
_, ext = os.path.splitext(fname)
if ext.lower() != ".xml":
continue
# Must be a file
fname = os.path.join(path, fname)
if not os.path.isfile(fname):
continue
# Read and parse the XML
xml_tree = ET.parse(fname, ET.XMLParser(remove_blank_text=True))
xml_root = xml_tree.getroot()
# Check if this is a GSB
if xml_root.tag != "rr_gsb":
continue
# Read and parse GSB entries
gsbs = []
for xml_element in xml_root:
gsbs.append(GsbEntry(xml_element))
# Store them
loc = (
int(xml_root.attrib["x"]),
int(xml_root.attrib["y"]),
)
gsb_data[loc] = gsbs
return gsb_data
|
import unittest
import numpy as np
from openaerostruct.aerodynamics.wave_drag import WaveDrag
from openaerostruct.utils.testing import run_test, get_default_surfaces
from openmdao.api import Group, IndepVarComp, BsplinesComp
import numpy as np
class Test(unittest.TestCase):
def test(self):
surface = get_default_surfaces()[0]
surface['with_wave'] = True
surface['t_over_c_cp'] = np.array([0.15, 0.21, 0.03, 0.05])
nx = surface['mesh'].shape[0]
ny = surface['mesh'].shape[1]
n_cp = len(surface['t_over_c_cp'])
group = Group()
indep_var_comp = IndepVarComp()
indep_var_comp.add_output('t_over_c_cp', val=surface['t_over_c_cp'])
indep_var_comp.add_output('Mach_number', val=.95)
indep_var_comp.add_output('CL', val=0.7)
indep_var_comp.add_output('widths', val = np.array([12.14757848, 11.91832712, 11.43730892]),units='m')
indep_var_comp.add_output('cos_sweep', val = np.array([10.01555924, 9.80832351, 9.79003729]),units='m')
indep_var_comp.add_output('chords', val = np.array([ 2.72835132, 5.12528179, 7.88916016, 13.6189974]),units='m')
group.add_subsystem('indep_var_comp', indep_var_comp, promotes=['*'])
group.add_subsystem('t_over_c_bsp', BsplinesComp(
in_name='t_over_c_cp', out_name='t_over_c',
num_control_points=n_cp, num_points=int(ny-1),
bspline_order=min(n_cp, 4), distribution='uniform'),
promotes_inputs=['t_over_c_cp'], promotes_outputs=['t_over_c'])
comp = WaveDrag(surface=surface)
group.add_subsystem('wavedrag', comp, promotes=['*'])
run_test(self, group, complex_flag=True)
if __name__ == '__main__':
unittest.main()
|
VERSION = (0, 5, 3,)
__version__ = '.'.join(map(str, VERSION))
default_app_config = 'django_rest_messages.apps.DjangoMessagesConfig'
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
def get_height(node):
return 0 if not node else 1 + max(get_height(node.left), get_height(node.right))
def update_output(node, row, left, right):
if not node:
return
mid = (left + right) // 2
self.output[row][mid] = str(node.val)
update_output(node.left, row + 1 , left, mid - 1)
update_output(node.right, row + 1 , mid + 1, right)
height = get_height(root)
width = 2 ** height - 1
self.output = [[''] * width for i in range(height)]
update_output(node=root, row=0, left=0, right=width - 1)
return self.output
|
from fastapi import APIRouter, Query, Path, Body, HTTPException, Depends
from fastapi.openapi.utils import get_openapi
from uuid import UUID
from typing import List
from starlette.status import HTTP_201_CREATED, HTTP_204_NO_CONTENT, HTTP_200_OK
from starlette.responses import JSONResponse, HTMLResponse
# model libraries
from app.models.schemas import Face, Model, ModelOut
from app.models.responses import (
created_header_schema, CreatedContent, paging_header_schema,
additional_error_post_responses, additional_error_responses
)
from app.models.security import User
from app.core.jwt import get_user_info
from app.core.config import HOST_NAME
# TODO(): change input parameters to dependency
from app.models.parameters import per_page_param, page_param
from app.crud.database import (create_model_db, get_models_db, get_model_db,
delete_model_db, get_faces_db, create_faces_db, count_models_db
)
import os
import json
current = os.path.dirname(os.path.dirname(__file__))
root = os.path.split(current)[0]
target_folder = os.path.join(root, 'app', 'models', 'samples')
file_path = os.path.join(target_folder, 'model_complete_multi_zone_office.json')
with open(file_path) as json_file:
model_sample = json.load(json_file)
from app.crud.util import generate_paging_link
router = APIRouter()
@router.get(
"/",
operation_id='models_welcome',
include_in_schema=False,
content_type=None
)
def say_hi_models():
"""Landing page."""
response = HTMLResponse(
'<html><body>'
'<h1>Welcome!</h1>'
'<h2>See the interactive <a href="/docs">API documentation</a></h2>'
'<h2>See the redoc <a href="/redoc">API documentation</a></h2>'
'</body></html>'
)
return response
@router.get(
"/models/",
operation_id='get_models',
tags=['Model'],
summary='Get Models',
status_code=HTTP_200_OK,
response_description='Retrieved',
response_model=List[ModelOut],
responses={
**additional_error_responses,
200: {
'headers': paging_header_schema
}
})
def get_models(
page: int = page_param,
per_page: int = per_page_param,
user: User = Depends(get_user_info)
):
"""Retrieve a list of models."""
link = 'http://%s/models' % (HOST_NAME)
total_count = count_models_db(user)
models = get_models_db(page, per_page, user)
header_links = generate_paging_link(link, page, total_count, per_page)
return JSONResponse(
status_code=200,
headers={'Link': str(header_links)},
content=[model.to_model_out() for model in models]
)
@router.post(
"/models/",
operation_id='create_model',
tags=['Model'],
summary='Create a Model',
status_code=HTTP_201_CREATED,
response_description='Created successfully',
response_model=CreatedContent,
responses={
**additional_error_post_responses,
201: {
'headers': created_header_schema
}
}
)
def create_model(
model: Model = Body(
...,
description = "A Pollination model",
title = "Model",
example = model_sample),
user: User = Depends(get_user_info)
):
"""Create a new model."""
nid = create_model_db(model, user)
location = '%s/models/%s' % (HOST_NAME, nid)
return JSONResponse(
status_code=201,
headers={'Location': location},
content={
'id': nid,
'message': 'Use Location in headers to access the new object.'
}
)
@router.get(
"/models/{id}",
operation_id='get_model',
tags=['Model'],
summary='Get a Model',
status_code=HTTP_200_OK,
response_description='Retrieved',
response_model=ModelOut,
responses={**additional_error_responses}
)
def get_model(
id: UUID = Path(..., title = "Model id."),
user: User = Depends(get_user_info)
):
"""Retrieve a sensor grid."""
return ModelOut.parse_obj(get_model_db(id, user).to_model_out())
@router.delete(
"/models/{id}",
operation_id='del_model',
tags=['Model'],
summary='Delete a Model',
status_code=HTTP_204_NO_CONTENT,
response_description='Success',
responses={**additional_error_responses}
)
def delete_model(
id: UUID = Path(
...,
title = "Model id",
description = "Model id."),
user: User = Depends(get_user_info)
):
"""Delete a sensor grid."""
delete_model_db(id, user)
@router.get(
"/models/{id}/faces",
operation_id='get_faces',
tags=['Model'],
summary='Get Model Faces',
status_code=HTTP_200_OK,
response_description='Retrieved',
response_model=List[Face],
responses={**additional_error_responses}
)
def get_faces(
id: UUID = Path(..., title = "model id."),
page: int = page_param,
per_page: int = per_page_param,
user: User = Depends(get_user_info)
):
"""Retrieve list of sensors for a sensor grid.
See Location in response headers for paging links.
"""
link = 'http://%s/models' % (HOST_NAME)
faces, face_count = get_faces_db(id, page, per_page, user)
links = generate_paging_link(link, page, face_count, per_page)
return JSONResponse(
status_code=200,
headers={'Link': str(links)},
content= [face.to_face_out() for face in faces]
)
|
# !/usr/bin/env python
# coding: utf-8
"""
this script automates the process of slicing the same stl file with many possible combinations of command line arguments
that can be passed to slic3r
"""
import glob
import itertools as it
import logging
import os
from logging.config import dictConfig # pylint: disable=ungrouped-imports
from subprocess import CalledProcessError, check_output
import pandas as pd
from stldeli import config
def flag2placeholder(flag):
"""
convert a flag into valid commandline argument
:param flag:
:return:
"""
logging.debug("flag2placeholder")
flag_str = str(flag)
flag_str_clean = flag_str.strip("-").replace("-", "_")
return flag_str_clean + "[" + flag_str_clean + "]"
def get_combinations_from_configurations(configurations):
"""
convert configured dict into a generator of all possible tuples
:param configurations:
:return:
"""
logging.info("get_combinations_from_configurations")
return it.product(*(configurations[Name] for Name in configurations))
def get_series_from_gcode(gcode_file_path):
metarow = pd.Series()
with open(gcode_file_path) as gcode_file:
for line in gcode_file.readlines():
if line.startswith(';'):
datum = line.strip('; \n').split('=')
if len(datum) == 2:
metarow[datum[0]] = datum[1]
return metarow
# pylint: disable=too-many-locals
def main():
"""
main function
:return: dataframe of metadata
"""
logging.info("main")
combinations = get_combinations_from_configurations(config.slic3r_configurations)
total = len(list(combinations))
logging.info("{} possible slices".format(total))
count = 0
_metadata = pd.DataFrame()
input_file = os.path.abspath("stl_files/largecube.stl")
for configuration in list(it.product(*config.slic3r_configurations.values())):
logging.debug("configuration = {}".format(configuration))
metarow = pd.Series(configuration, index=config.slic3r_configurations.keys())
output_file_format = "[input_filename_base]"
print("{} out of {}".format(count + 1, total))
cmd = ["slic3r"]
for key, value in zip(config.slic3r_configurations.keys(), configuration):
logging.debug("adding {} with value of {} to cmd".format(key, value))
metarow[key] = value
if value:
cmd.append(str(key))
if not isinstance(value, bool):
cmd.append(str(value))
output_file_format += "_" + flag2placeholder(key)
cmd.append("--output-filename-format")
gcode_file_path = "{count}_{output_file_format}_.gcode" \
.format(
count=count,
output_file_format=output_file_format
)
cmd.append(gcode_file_path)
cmd.append(input_file)
metarow = metarow.append(pd.Series(count, index=["filenumber"]))
cmd_str = ''
for arg in cmd:
cmd_str += ' ' + str(arg)
print(cmd_str)
try:
check_output(cmd)
metarow = pd.concat([metarow,get_series_from_gcode(gcode_file_path)],axis=1)
os.remove(gcode_file_path)
_metadata = _metadata.append(metarow, ignore_index=True)
count += 1
except CalledProcessError as error_message:
print("unable to slice with error: {}".format(error_message))
continue
return _metadata
if __name__ == '__main__':
os.makedirs(config.LOG_DIR, exist_ok=True)
dictConfig(config.LOG_DICT_CONFIG)
metadata = main() # pylint: disable=invalid-name
metadata.to_csv('metadata.csv')
|
# Under MIT License, see LICENSE.txt
from math import ceil, sqrt
import numpy
from RULEngine.Util.constant import *
from RULEngine.Util.Position import Position
from ai.Algorithm.IntelligentModule import IntelligentModule
__author__ = 'RoboCupULaval'
class InfluenceMap(IntelligentModule):
"""
Une classe représentant une influence map d'un terrain de robosoccer. C'est une matrice de cellule représentant le
terrain auquelle on ajoute des points avec des influences qui se propagent sur le reste du terrain.
La matrice et le code respecte le format suivant:
- axe X ou axe 0 est l'axe représentant les rangées / l'axe 0 de la matrice /
la largeur (axe Y) du terrain physique
- axe Y ou axe 1 est l'axe représentant les colonnes / l'axe 1 de la matrice /
la longueur (axe X) du terrain physique
- Le point d'origine (0, 0) se situe au coin supérieur gauche du terrain physique.
- Ce qui veut dire que c'est tableaux sont row-major.
L'algorithm de propagation de l'influence est:
(force appliqué à la case) = (force du point d'origine de l'influence) * ((facteur de réduction) ** (distance))
transfomé en int arrondie vers 0.
"""
def __init__(self, game_state, resolution=100, strength_decay=0.90, strength_peak=100, effect_radius=40,
have_static=False, have_it_executed=False):
"""
Constructeur de la classe InfluenceMap
:param game_state: référence vers le game state
:param resolution: résolution des cases (défaut = 100)
:param strength_decay: facteur de réduction de l'influence par la distance (défaut = 0.8)
:param strength_peak: maximum de la force appliquable par un point (est aussi le min) (défaut = 100)
:param effect_radius: distance qui borne la propagation de l'influence autour de l'origine (défaut = 40)
"""
assert isinstance(resolution, int), "Creation InfluenceMap avec param resolution autre que int"
assert isinstance(strength_decay, float), "Creation InfluenceMap avec param strength_decay autre que int"
assert isinstance(effect_radius, int), "Creation InfluenceMap avec param effect_radius autre que int"
assert isinstance(strength_peak, int), "Creation InfluenceMap avec param strength_peak autre que int"
assert isinstance(have_static, bool), "Creation InfluenceMap avec param have_static autre que bool"
assert isinstance(have_it_executed, bool), "Creation InfluenceMap avec param have_it_executed autre que bool"
assert 0 < resolution, "Creation InfluenceMap avec param resolution <= 0"
assert 0 < strength_decay < 1, "Creation InfluenceMap avec param strength_decay pas dans intervalle ]0, 1["
assert 0 < strength_peak, "Creation InfluenceMap avec param strength_decay <= à 0"
assert 0 < effect_radius, "Creation InfluenceMap avec param effect_radius <= 0"
super().__init__(game_state)
# board parameters
self._resolution = resolution
self._strength_decay = strength_decay
self._strength_peak = strength_peak
self._effect_radius = effect_radius
self._border_strength = - strength_peak * 0.03 # TODO change this variable for something not out of thin air!
# things on the baord parameters
self._ball_position_on_board = ()
self._number_of_rows, self._number_of_columns = self._calculate_rows_and_columns()
if self.state is not None:
self.have_it_executed = have_it_executed
self._last_updated = self.state.get_timestamp()
self._adjust_effect_radius()
# different tableau pour enregistrer les différentes représentation
self._static_boards = None
self._starterboard = self._create_standard_influence_board()
self._board = self._create_standard_influence_board()
# those board are optionnal
self._borders_board = self._create_standard_influence_board()
self._goals_board = self._create_standard_influence_board()
# todo determine how to choose if you want different kinds of static boards (ex: borders, goals, to determine..)
if have_static:
self._create_static_board()
def update(self):
if self.have_it_executed:
if self.state.get_timestamp() - self._last_updated > 1:
# purge the board with a new one (static or not)
if self._static_boards is not None:
self._board = numpy.copy(self._static_boards)
else:
self._board = self._create_standard_influence_board()
self._update_and_draw_robot_position()
self._update_ball_position()
self.state.debug_manager.add_influence_map(self.export_board())
self._last_updated = self.state.timestamp
def export_board(self):
return self._board.tolist()
def find_points_over_strength_square(self, top_left_position, bottom_right_position, strength):
"""
Retourne les points qui se trouve au-dessus ou égale à la force demandé dans le tableau principal(_board)
:param Positon top_left_position: la rangé supérieure
:param Position bottom_right_position: la rangé inférieure
:param int strength: le force à trouvé qui est égale ou au dessus.
:return: un liste de point qui se trouve au dessus ou égale à la force demandé
"""
assert isinstance(top_left_position, Position), "Cette méthode requiert un object Position"
assert isinstance(bottom_right_position, Position), "Cette méthode requiert un object Position"
top_row, left_column = self._transform_field_to_board_position(top_left_position)
bot_row, right_column = self._transform_field_to_board_position(bottom_right_position)
ind_x, ind_y = numpy.where(self._board[top_row:bot_row, left_column:right_column] >= strength)
ind_x = ind_x.tolist()
ind_x = [x+top_row for x in ind_x]
ind_y = ind_y.tolist()
ind_y = [x+left_column for x in ind_y]
indices = zip(ind_x, ind_y)
return list(indices)
def find_points_under_strength_square(self, top_left_position, bottom_right_position, strength):
"""
Retourne les points qui se trouve au-dessous ou égale à la force demandé dans le tableau principal(_board)
:param Positon top_left_position: la rangé supérieure
:param Position bottom_right_position: la rangé inférieure
:param int strength: le force à trouvé qui est égale ou au dessous.
:return: un liste de point qui se trouve au-dessous ou égale à la force demandé
:rtype: une liste de tuple rangée * colonne (int * int) list
"""
assert isinstance(top_left_position, Position), "Cette méthode requiert un object Position"
assert isinstance(bottom_right_position, Position), "Cette méthode requiert un object Position"
top_row, left_column = self._transform_field_to_board_position(top_left_position)
bot_row, right_column = self._transform_field_to_board_position(bottom_right_position)
ind_x, ind_y = numpy.where(self._board[top_row:bot_row, left_column:right_column] <= strength)
ind_x = ind_x.tolist()
ind_x = [x+top_row for x in ind_x]
ind_y = ind_y.tolist()
ind_y = [x+left_column for x in ind_y]
indices = zip(ind_x, ind_y)
return list(indices)
def find_max_value_in_board(self):
"""
Permet de trouver les points du tableau qui ont la plus grande valeur du tableau
:return: la valeur maximale du tableau et la liste de point (rangée * colonne) des point qui ont la valeur max
:rtype: tuple (int * (int * int) list)
"""
uniques = numpy.unique(self._board)
max_in_board = uniques[-1]
x, y = numpy.where(self._board >= max_in_board)
x = x.tolist()
y = y.tolist()
indices = zip(x, y)
return max_in_board, indices
def find_min_value_in_board(self):
"""
Permet de trouver les points du tableau qui ont la plus petite valeur du tableau
:return: la valeur minimale du tableau et la liste de point (rangée * colonne) des point qui ont la valeur min
:rtype: tuple (int * (int * int) list)
"""
uniques = numpy.unique(self._board)
min_in_board = uniques[0]
x, y = numpy.where(self._board <= min_in_board)
x = x.tolist()
y = y.tolist()
indices = zip(x, y)
return min_in_board, indices
def find_max_value_in_circle(self, center, radius):
pass
def get_influence_at_position(self, position):
assert isinstance(position, Position), "accessing this function require a Position object"
row_column_in_board = self._transform_field_to_board_position(position)
return self._board[row_column_in_board[0], row_column_in_board[1]]
def get_ball_influence(self):
return self._board[self._ball_position_on_board[0], self._ball_position_on_board[1]]
def get_number_of_cells(self):
return self._number_of_rows * self._number_of_columns
def print_board_to_file(self):
"""
Create a file in the same running directory with a representation of the current board.
"""
numpy.savetxt("IMBoard", self._board, fmt='%4i')
# todo remove this line while not in debug mode
print(self._starterboard.shape[0], " x ", self._starterboard.shape[1])
def str(self):
# todo comment and make sure this is right! Embelish?
return "Influence Map - ", str(self._number_of_rows), " x ", str(self._number_of_columns)
def _update_and_draw_robot_position(self):
"""
Fetch la position des robots dans le gamestate et les applique sur le tableau principal
"""
robots_position = []
for i in range(self.state.get_count_player()):
try:
friend_position = self._transform_field_to_board_position(self.state.get_player_position(i))
robots_position.append(friend_position)
self._add_point_and_propagate_influence(friend_position[0], friend_position[1], self._board, 100)
except:
pass
try:
enemy_position = self._transform_field_to_board_position(self.state.get_enemy_position(i))
robots_position.append(enemy_position)
self._add_point_and_propagate_influence(enemy_position[0], enemy_position[1], self._board, -100)
except:
pass
self._clamp_board(self._board)
def _update_ball_position(self):
self._ball_position_on_board = self._transform_field_to_board_position(self.state.get_ball_position())
def _calculate_rows_and_columns(self):
"""
Utilise la resolution pour calculer le nombre de rangée(rows) et de colonne(columns) pour le terrain
:return: le nombre de rangées et le nombre de colonnes
:rtype: tuple (int * int)
"""
numberofrow = int(ceil((abs(FIELD_Y_BOTTOM) + FIELD_Y_TOP) / self._resolution))
numberofcolumn = int(ceil((abs(FIELD_X_LEFT) + FIELD_X_RIGHT) / self._resolution))
return numberofrow, numberofcolumn
def _create_standard_influence_board(self):
"""
Crée un objet numpy.ndarray, une liste à 2 dimenson de self._number_of_rows par self._number_of_columns d'int16.
:return: Un numpy.ndarray d'int16 de self._number_of_rows par self._number_of_columns.
:rtype: numpy.ndarray dtype=numpy.int16
"""
return numpy.zeros((self._number_of_rows, self._number_of_columns), numpy.int16)
def _adjust_effect_radius(self):
"""
Permet d'ajuster le rayon d'effet de l'influence (self._effect_radius) si celui-ci éxède la propagation normale.
Si la propagation de l'influence se retrouve à zéro avant d'avoir atteint le rayon d'effet défini à la création
de la classe, cette méthode l'ajuste pour quelle donne exactement le rayon qui permet d'afficher tous les points
qui ne donnent pas zéro.
influence dans les cases:
100 91 86 ... 6 5 4 3 2 1 0 0 0 0 0 0 0 sans effect_radius ajusté (si effect_radius est trop gros)
v
100 91 86 ... 6 5 4 3 2 1 avec effect_radius ajusté
Coupe le nombre d'itérations des boucles de mise en place de l'influence lorsque le calcul donnerait une
influence à 0.
"""
distance_from_center = 0
strenght_value_at_current_distance = self._strength_peak
while strenght_value_at_current_distance >= 1:
distance_from_center += 1
decay = self._compute_value_by_distance(self._strength_peak, distance_from_center)
strenght_value_at_current_distance = decay
if self._effect_radius > distance_from_center:
self._effect_radius = distance_from_center
def _initialize_borders_board(self):
"""
Ajoute des bordures sur le board dédie _borders_board
"""
temp_array = self._create_standard_influence_board()
self._put_and_propagate_borders(temp_array)
numpy.add(temp_array, self._borders_board, out=self._borders_board)
def _put_and_propagate_borders(self, board_to_apply):
"""
Mets des bordures sur un array numpy.ndarray vierge.
:param board_to_apply: Un numpy.ndarray un array vierge pour y ajouter des bordures.
:type board_to_apply: numpy.ndarray dtype=numpy.int16
"""
assert(isinstance(board_to_apply, numpy.ndarray))
assert(board_to_apply.shape[0] == self._number_of_rows)
assert(board_to_apply.shape[1] == self._number_of_columns)
assert(board_to_apply.dtype == numpy.int16)
board_to_apply[0] = self._border_strength
board_to_apply[:, 0] = self._border_strength
# keep the effectradius low while making the border speed up greatly the initialization, also you don't need
# so much border
# TODO see if this variable is okay, maybe change the way it is determined
border_variance = 2
temp_effectradius = int(ceil(ROBOT_RADIUS / self._resolution)) + border_variance
# TODO make sure this does what you think it does.
# Top border
for border in range(0, self._number_of_columns):
# only for the rows affected by the change.
for x in range(0, temp_effectradius):
columnmin = max(0, (border - temp_effectradius - 1))
columnmax = min(self._number_of_columns, (border + temp_effectradius + 1))
# for every columns affected
for y in range(columnmin, columnmax):
decay = self._compute_value_by_distance(self._border_strength, self._distance(0, border, x, y))
board_to_apply[x, y] += decay
# left border
for border in range(0, self._number_of_rows):
for y in range(0, temp_effectradius):
rowmin = max(0, (border - temp_effectradius - 1))
rowmax = min(self._number_of_rows, border + temp_effectradius + 1)
for x in range(rowmin, rowmax):
decay = self._compute_value_by_distance(self._border_strength, self._distance(border, 0, x, y))
board_to_apply[x, y] += decay
# Prend l'image créer et la flip l-r et u-d puis additionne
temp_inverse_board = numpy.copy(board_to_apply)
temp_inverse_board = temp_inverse_board[::-1, ::-1]
board_to_apply += temp_inverse_board
def _initialize_goals_board(self, v_h_goal_offset):
"""
Mets des buts sur le starterboard et le board
"""
# todo fetch which side we are on.
temp_array = self._create_standard_influence_board()
self._put_goals_and_propagate(v_h_goal_offset, temp_array)
numpy.add(temp_array, self._goals_board, out=self._goals_board)
def _calculate_goal_vertical_offset(self):
"""
Calcule la dimension vertical du but p/r à la résolution.
:return: la valeur verticale du but ajusté à la résolution
:rtype: int
"""
return int(ceil(FIELD_GOAL_Y_TOP / self._resolution))
def _calculate_goal_horizontal_offset(self):
"""
Calcule la dimension horizontale du but p/r à la résolution.
:return: la valeur horizontale du but ajusté à la résolution
:rtype: int
"""
return int(ceil(FIELD_GOAL_SEGMENT / self._resolution))
def _put_goals_and_propagate(self, v_h_offset, board_to_apply):
"""
Mets des buts sur le numpy.ndarray fourni.
:param v_h_offset: la dimension verticale et horizontale des buts.
:type v_h_offset: tuple (int * int)
:param board_to_apply: Un numpy.ndarray un array vierge pour y ajouter des buts.
:type board_to_apply: numpy.ndarray dtype=numpy.int16
"""
# TODO take into account what team you are ie: orientation and strength adjustment
for x in range(int(self._number_of_rows / 2 - v_h_offset[0]),
int(self._number_of_rows / 2 + v_h_offset[0]) + 1,
6):
self._add_point_and_propagate_influence(x, 0, board_to_apply, 100)
numpy.add((numpy.negative(board_to_apply[:, ::-1])), board_to_apply, out=board_to_apply)
def _create_static_board(self):
"""
Crée des tableaux pour les bordures et les buts et les enregistres dans les variables de classes déjà présente.
Il reste à déterminer comment on s'occupe de choisir quel tableau static à créer.
"""
# TODO see to make flags to control what goes into the static board
self._static_boards = self._create_standard_influence_board()
self._initialize_borders_board()
numpy.add(self._borders_board, self._static_boards, out=self._static_boards)
v_h_goal_offset = (self._calculate_goal_vertical_offset(), self._calculate_goal_horizontal_offset())
self._initialize_goals_board(v_h_goal_offset)
numpy.add(self._goals_board, self._static_boards, out=self._static_boards)
self._clamp_board(self._static_boards)
# numpy.savetxt("Debug", self._static_boards, fmt='%5i')
def _compute_value_by_distance(self, strength, distance):
"""
Calcule la valeur qu'une recoit d'un point de force strength à distance distance.
:param strength: la force du point central
:type strength: int
:param distance: la distance entre le point central et le point où calculer la value
:type distance: int or float
:return: la valeur calculé
:rtype: int
"""
return int(strength * (self._strength_decay ** distance))
def _clamp_board(self, board_to_clamp):
"""
Arrondis toutes les cellules du tableau pour qu'ils soient dans [-self._strength_peak, self._strength_peak].
:param board_to_clamp: Un numpy.ndarray à clampé
:type board_to_clamp: numpy.ndarray dtype=numpy.int16
"""
numpy.clip(board_to_clamp, -self._strength_peak, self._strength_peak, out=board_to_clamp)
def _add_point_and_propagate_influence(self, row, column, board_to_apply, strength=0):
"""
Pose un point et propage son influence sur le tableau donné.
Il est important de clamper le board après avoir utiliser cette méthode!
:param int row: la rangée du point d'origine de la force
:param int column: int La colonne du point d'origine de la force
:param board_to_apply: numpy.ndarray l'array sur lequel on ajoute un point et on le propage
:type board_to_apply: numpy.ndarray dtype=numpy.int16
:param int strength: la force du point à appliquer
"""
assert (isinstance(row, int))
assert (isinstance(column, int))
assert (0 <= row < self._number_of_rows)
assert (0 <= column < self._number_of_columns)
assert (isinstance(board_to_apply, numpy.ndarray))
assert (board_to_apply.shape[0] == self._number_of_rows)
assert (board_to_apply.shape[1] == self._number_of_columns)
assert (isinstance(strength, int))
assert (-self._strength_peak <= strength <= self._strength_peak)
rowmin = max(0, (row - self._effect_radius))
rowmax = min((self._number_of_rows - 1), (row + self._effect_radius))
columnmin = max(0, (column - self._effect_radius))
columnmax = min((column + self._effect_radius), self._number_of_columns)
cases_iterator = numpy.nditer(board_to_apply[rowmin:(rowmax + 1), columnmin:(columnmax + 1)],
flags=['multi_index'], op_flags=['readwrite'])
while not cases_iterator.finished:
to_put = self._compute_value_by_distance(strength, self._distance(cases_iterator.multi_index[0],
cases_iterator.multi_index[1],
(row - rowmin),
(column - columnmin)))
influence_already_in_case = cases_iterator[0]
influence_to_put_instead = to_put + influence_already_in_case
cases_iterator[0] = influence_to_put_instead
cases_iterator.iternext()
def _transform_field_to_board_position(self, position):
assert(isinstance(position, Position))
assert(FIELD_X_LEFT <= position.x <= FIELD_X_RIGHT)
assert(FIELD_Y_BOTTOM <= position.y <= FIELD_Y_TOP)
# this should hold up
xpos = -position.x + ((abs(FIELD_X_LEFT) + FIELD_X_RIGHT) / 2)
ypos = position.y + ((abs(FIELD_Y_BOTTOM) + FIELD_Y_TOP) / 2)
xpos = int(xpos / self._resolution)
ypos = int(ypos / self._resolution)
if ypos == self._number_of_rows:
ypos -= 1
if xpos == self._number_of_columns:
xpos -= 1
return ypos, xpos
def _transform_board_to_field_position(self, row, column):
assert(isinstance(row, int))
assert(isinstance(column, int))
assert(0 <= row <= self._number_of_rows)
assert(0 <= column <= self._number_of_columns)
# This should hold up
ypos = row * self._resolution
xpos = column * self._resolution
ypos = (ypos - ((abs(FIELD_Y_BOTTOM) + FIELD_Y_TOP) / 2)) + (self._resolution / 2)
xpos = (xpos - ((abs(FIELD_X_LEFT) + FIELD_X_RIGHT) / 2)) + (self._resolution / 2)
tempposition = Position(xpos, ypos)
return tempposition
def _distance(self, x1, y1, x2, y2):
assert (isinstance(x1, int or float))
assert (isinstance(x2, int))
assert (isinstance(y1, int))
assert (isinstance(y2, int))
return sqrt(((x2 - x1) ** 2) + ((y2 - y1) ** 2))
|
import sys
from rply.token import BaseBox, Token
from rply import ParsingError
# -hacking-
def _token_init(self, name, value, lineno=0):
self.name = name
self.value = value
self.lineno = lineno
def _basebox_getsourcepos(self):
return self.lineno
def _basebox_append_item(self, item):
raise NotImplementedError(self)
BaseBox._attrs_ = ['lineno']
BaseBox.getsourcepos = _basebox_getsourcepos
BaseBox.append_item = _basebox_append_item
Token._attrs_ = ['name', 'value']
Token.__init__ = _token_init
del Token.getsourcepos
# -done-
KEYWORDS = {
"print": 'T_PRINT',
"echo": 'T_ECHO',
"instanceof": 'T_INSTANCEOF',
"new": 'T_NEW',
"clone": 'T_CLONE',
"exit": 'T_EXIT',
"die": 'T_EXIT',
"if": 'T_IF',
"elseif": 'T_ELSEIF',
"else": 'T_ELSE',
"endif": 'T_ENDIF',
"array": 'T_ARRAY',
"Array": "T_ARRAY",
"include": 'T_INCLUDE',
"include_once": 'T_INCLUDE_ONCE',
"eval": 'T_EVAL',
"require": 'T_REQUIRE',
"require_once": 'T_REQUIRE_ONCE',
"or": 'T_LOGICAL_OR',
"xor": 'T_LOGICAL_XOR',
"and": 'T_LOGICAL_AND',
"foreach": 'T_FOREACH',
"endforeach": 'T_ENDFOREACH',
"do": 'T_DO',
"while": 'T_WHILE',
"endwhile": 'T_ENDWHILE',
"for": 'T_FOR',
"endfor": 'T_ENDFOR',
"declare": 'T_DECLARE',
"enddeclare": 'T_ENDDECLARE',
"as": 'T_AS',
"switch": 'T_SWITCH',
"endswitch": 'T_ENDSWITCH',
"case": 'T_CASE',
"default": 'T_DEFAULT',
"break": 'T_BREAK',
"continue": 'T_CONTINUE',
"goto": 'T_GOTO',
"function": 'T_FUNCTION',
"const": 'T_CONST',
"return": 'T_RETURN',
"try": 'T_TRY',
"catch": 'T_CATCH',
"throw": 'T_THROW',
"use": 'T_USE',
#"insteadof": 'T_INSTEADOF',
"global": 'T_GLOBAL',
"static": 'T_STATIC',
"abstract": 'T_ABSTRACT',
"final": 'T_FINAL',
"private": 'T_PRIVATE',
"protected": 'T_PROTECTED',
"public": 'T_PUBLIC',
"var": 'T_VAR',
"unset": 'T_UNSET',
"isset": 'T_ISSET',
"empty": 'T_EMPTY',
"class": 'T_CLASS',
#"trait": 'T_TRAIT',
"interface": 'T_INTERFACE',
"extends": 'T_EXTENDS',
"implements": 'T_IMPLEMENTS',
"list": 'T_LIST',
"__halt_compiler": 'T_HALT_COMPILER',
"__FILE__": 'T_FILE',
"__CLASS__": 'T_CLASS_C',
#"__TRAIT__": 'T_TRAIT_C',
"__METHOD__": 'T_METHOD_C',
"__FUNCTION__": 'T_FUNC_C',
"__LINE__": 'T_LINE',
"__NAMESPACE__": 'T_NS_C',
"__DIR__": 'T_DIR',
}
RULES = [(r'\b%s\b' % keyword, name) for keyword, name in KEYWORDS.items()]
RULES += [
("b?\<\<\<.*?\n", 'T_START_HEREDOC'),
("\x00", 'T_END_HEREDOC'), # generated artificially
("\x00", 'T_ENCAPSED_AND_WHITESPACE'), # generated artificially
("\x00", 'T_IGNORE_THIS_TOKEN'), # generated artificially
(r'b?"([^"\\]|\\.)*"|' +
r"b?'([^'\\]|\\.)*'", 'T_CONSTANT_ENCAPSED_STRING'),
("[a-zA-Z_][a-zA-Z_0-9]*", 'T_STRING'),
("\?\>", 'B_END_OF_CODE_BLOCK'),
("\x00", 'B_LITERAL_BLOCK'),
("\+\=", 'T_PLUS_EQUAL'),
("\-\=", 'T_MINUS_EQUAL'),
("\*\=", 'T_MUL_EQUAL'),
("\/\=", 'T_DIV_EQUAL'),
("\.\=", 'T_CONCAT_EQUAL'),
("\%\=", 'T_MOD_EQUAL'),
("\&\=", 'T_AND_EQUAL'),
("\|\=", 'T_OR_EQUAL'),
("\^\=", 'T_XOR_EQUAL'),
("\<\<\=", 'T_SL_EQUAL'),
("\>\>\=", 'T_SR_EQUAL'),
("\|\|", 'T_BOOLEAN_OR'),
("\&\&", 'T_BOOLEAN_AND'),
("\=\=\=", 'T_IS_IDENTICAL'),
("\!\=\=", 'T_IS_NOT_IDENTICAL'),
("\=\=", 'T_IS_EQUAL'),
("\!\=", 'T_IS_NOT_EQUAL'),
("\<\>", 'T_IS_NOT_EQUAL'),
("\<\=", 'T_IS_SMALLER_OR_EQUAL'),
("\>\=", 'T_IS_GREATER_OR_EQUAL'),
("\<\<", 'T_SL'),
("\>\>", 'T_SR'),
("\+\+", 'T_INC'),
("\-\-", 'T_DEC'),
("\((int|integer)\)", 'T_INT_CAST'),
("\((real|double|float)\)", 'T_DOUBLE_CAST'),
("\((string|binary)\)", 'T_STRING_CAST'),
("\(array\)", 'T_ARRAY_CAST'),
("\(object\)", 'T_OBJECT_CAST'),
("\((bool|boolean)\)", 'T_BOOL_CAST'),
("\(unset\)", 'T_UNSET_CAST'),
("\(unicode\)", 'T_UNICODE_CAST'),
("([0-9]*\.[0-9]*|[0-9][0-9]*)[eE](\+|\-)?[0-9][0-9]*", 'T_DNUMBER'),
("[0-9]+\.[0-9]+", 'T_DNUMBER'),
("\.[0-9]+", 'T_DNUMBER'),
("[0-9]+\.", 'T_DNUMBER'),
("0x([0-9a-fA-F])*", 'T_LNUMBER'),
("0X([0-9a-fA-F])*", 'T_LNUMBER'),
("[0-9]+", 'T_LNUMBER'),
("\$[a-zA-Z_][0-9a-zA-Z_]*", 'T_VARIABLE'),
#("\$\{[a-zA-Z]*\}", 'T_STRING_VARNAME'),
("(//[^\n]*)|(#[^\n]*)|(/\*(.|\n)*?\*/)", 'T_COMMENT'),
("\-\>", 'T_OBJECT_OPERATOR'),
("\=\>", 'T_DOUBLE_ARROW'),
("comment", 'T_COMMENT'),
("doc comment", 'T_DOC_COMMENT'),
#("open tag", 'T_OPEN_TAG'),
#("open tag with echo", 'T_OPEN_TAG_WITH_ECHO'),
#("close tag", 'T_CLOSE_TAG'),
("whitespace", 'T_WHITESPACE'),
("namespace", 'T_NAMESPACE'),
("\\\\", 'T_NS_SEPARATOR'),
("\:\:", 'T_PAAMAYIM_NEKUDOTAYIM'),
("\&", '&'),
("\,", ','),
("\;", ';'),
("\:", ':'),
("\=", '='),
("\?", '?'),
("\|", '|'),
("\^", '^'),
("\<", '<'),
("\>", '>'),
("\+", '+'),
("\-", '-'),
("\.", '.'),
("\*", '*'),
("\/", '/'),
("\%", '%'),
("\!", '!'),
("\[", '['),
("\]", ']'),
('\(', '('),
('\)', ')'),
("\{", '{'),
("\}", '}'),
("\~", '~'),
("\@", '@'),
("\$", '$'),
("\"", '"'),
("`", '`'),
("\\n", 'H_NEW_LINE'),
(r"\r\n", 'H_NEW_LINE'),
("\\t", 'H_TABULATURE'),
(" ", 'H_WHITESPACE')]
RULES_FOR_DOUBLE_QUOTE = [
("\$[a-zA-Z_][0-9a-zA-Z_]*", 'T_VARIABLE'),
("->", "T_OBJECT_OPERATOR"),
(r"\{\$|\$\{", "T_DOLLAR_OPEN_CURLY_BRACES"),
(r"([^\\\"$\{]|\\.|\$[^a-zA-Z\"{]|\$(?=\")|{[^$])+", "T_ENCAPSED_AND_WHITESPACE"),
('"', '"'),
]
RULES_FOR_BACKTICK = [
(r"\{\$|\$\{", "T_DOLLAR_OPEN_CURLY_BRACES"),
("`", '`'),
("\}", '}'),
("\{", '{'),
("\$[a-zA-Z_][0-9a-zA-Z_]*", 'T_VARIABLE'),
("->", "T_OBJECT_OPERATOR"),
(r"([^\\$\{\`\}]|\\.|\$[^a-zA-Z\"{]|\$(?=\")|{[^$])+", "T_ENCAPSED_AND_WHITESPACE"),
]
RULES_FOR_HEREDOC = [
("\$[a-zA-Z_][0-9a-zA-Z_]*", 'T_VARIABLE'),
(r"\{\$|\$\{", "T_DOLLAR_OPEN_CURLY_BRACES"),
(r"([^\\$\{]|\\.|\$[^a-zA-Z\"{]|\$(?=\")|{[^$]|\\$)+", "T_ENCAPSED_AND_WHITESPACE"),
]
RULES_FOR_BRACKETS = [
("\]", "]"),
("\[", "["),
("\d+", "T_NUM_STRING"),
("\$[a-zA-Z_][0-9a-zA-Z_]*", 'T_VARIABLE'),
("[a-zA-Z_][0-9a-zA-Z_]*", 'T_STRING'),
]
ALL_RULES = RULES + RULES_FOR_DOUBLE_QUOTE + RULES_FOR_BRACKETS
PRECEDENCES = [
("left", ["T_INCLUDE", "T_INCLUDE_ONCE", "T_EVAL",
"T_REQUIRE", "T_REQUIRE_ONCE"]),
("left", [","]),
("left", ["T_LOGICAL_OR", ]),
("left", ["T_LOGICAL_XOR", ]),
("left", ["T_LOGICAL_AND", ]),
("right", ["T_PRINT", ]),
#("right", ["T_YIELD",]),
("left", ['=', "T_PLUS_EQUAL", "T_MINUS_EQUAL", "T_MUL_EQUAL",
"T_DIV_EQUAL", "T_CONCAT_EQUAL", "T_MOD_EQUAL",
"T_AND_EQUAL", "T_OR_EQUAL", "T_XOR_EQUAL",
"T_SL_EQUAL", "T_SR_EQUAL"]),
("left", ["?", ":"]),
("left", ["T_BOOLEAN_OR"]),
("left", ["T_BOOLEAN_AND"]),
("left", ["!"]),
("left", ["|"]),
("left", ["^"]),
("left", ["&"]),
("nonassoc", ["T_IS_EQUAL", "T_IS_NOT_EQUAL",
"T_IS_IDENTICAL", "T_IS_NOT_IDENTICAL"]),
("nonassoc", ['<', "T_IS_SMALLER_OR_EQUAL", '>', "T_IS_GREATER_OR_EQUAL"]),
("left", ["T_SL", "T_SR"]),
("left", ["+", "-", "."]),
("left", ["*", "/", "%"]),
("nonassoc", ["T_INSTANCEOF"]),
("right", ['~', 'T_INC', 'T_DEC', 'T_INT_CAST',
'T_DOUBLE_CAST', 'T_STRING_CAST',
'T_UNICODE_CAST', 'T_BINARY_CAST', 'T_ARRAY_CAST',
'T_OBJECT_CAST', 'T_BOOL_CAST', 'T_UNSET_CAST', '@']),
("right", ["["]),
("nonassoc", ["T_NEW", "T_CLONE"]),
# XXX TODO: find out why this doesnt work
# ("left", ["T_ELSEIF"]),
# ("left", ["T_ELSE"]),
# ("left", ["T_ENDIF"]),
("right", ["T_STATIC", "T_ABSTRACT", "T_FINAL",
"T_PRIVATE", "T_PROTECTED", "T_PUBLIC"]),
]
class BaseLexer(object):
lineno = 0
buf = None
pos = 0
last_token = None
class LexerError(ParsingError):
""" Lexer error exception.
pos:
Position in the input line where the error occurred.
"""
def __init__(self, message, source_pos):
self.message = message
self.source_pos = source_pos
def __str__(self):
return 'LexerError("%s", %d)' % (self.message, self.source_pos)
(CONTEXT_NORMAL, CONTEXT_OBJECT_ACCESS,
CONTEXT_DOUBLEQUOTE, CONTEXT_CURLY_BRACES, CONTEXT_BRACKETS,
CONTEXT_HEREDOC, CONTEXT_BACKTICK) = range(7)
""" How this goes: we start with a normal context (CONTEXT_NORMAL) and some
tokens change the context. If we change the context to a new one, we push
the old one on the stack. Some tokens will pop stuff from the stack.
"""
class Lexer(BaseLexer):
""" A simple regex-based lexer/tokenizer.
See below for an example of usage.
"""
def __init__(self, use_rsre=False):
""" Create a lexer.
rules:
A list of rules. Each rule is a `regex, type`
pair, where `regex` is the regular expression used
to recognize the token and `type` is the type
of the token to return when it's recognized.
"""
self.use_rsre = use_rsre
# initialize rules for all the possible contextes
self.rules = [None for _ in range(7)]
self.rules[CONTEXT_NORMAL] = self._compile_rules(RULES)
rules_no_kwds = self.rules[CONTEXT_NORMAL][len(KEYWORDS):]
self.rules[CONTEXT_OBJECT_ACCESS] = rules_no_kwds
rules = self._compile_rules(RULES_FOR_DOUBLE_QUOTE)
self.rules[CONTEXT_DOUBLEQUOTE] = rules
self.rules[CONTEXT_BACKTICK] = self._compile_rules(RULES_FOR_BACKTICK)
self.rules[CONTEXT_CURLY_BRACES] = self.rules[CONTEXT_NORMAL]
self.rules[CONTEXT_BRACKETS] = self._compile_rules(RULES_FOR_BRACKETS)
self.rules[CONTEXT_HEREDOC] = self._compile_rules(RULES_FOR_HEREDOC)
self.context_stack = [CONTEXT_NORMAL]
self.var_re = self._compile("\{[a-zA-Z_][a-zA-Z_0-9]*")
self.heredoc_finish = -1
self.heredoc_lgt = 0
def _compile(self, re):
if self.use_rsre:
from rpython.rlib.rsre.rsre_re import compile, M, DOTALL, IGNORECASE
else:
from re import compile, M, DOTALL, IGNORECASE
return compile(re, IGNORECASE | M | DOTALL)
def _compile_rules(self, rules):
compiled = []
for regex, type in rules:
compiled.append((self._compile(regex), type))
return compiled
def input(self, buf, pos, lineno):
""" Initialize the lexer with a buffer as input.
"""
self.buf = buf
self.pos = pos
self.lineno = lineno
self.last_token = None
def _scan_double_quote(self, tok):
p = 1
v = tok.value
if v[0] == "b":
p += 1
backslash = False
while p < len(v):
c = v[p]
if not backslash:
if c == '"':
# not encountered anything funny, this is just T_STRING
return tok
if (((c == '$' and p < len(v) - 1 and v[p + 1].isalpha()) or
(c == "{" and p < len(v) - 1 and v[p + 1] == "$") or
(c == "$" and p < len(v) - 1 and v[p + 1] == "{"))):
p += 1
self.context_stack.append(CONTEXT_DOUBLEQUOTE)
return Token('"', '"', self.lineno)
elif c == '\\':
backslash = True
else:
backslash = False
p += 1
assert False
def _gettmpbuf(self, pos):
if self.use_rsre:
return None
if self.heredoc_finish >= 0:
return self.buf[pos:self.heredoc_finish]
else:
return self.buf[pos:]
def match(self, token_regex, tmp_buf, pos):
if self.use_rsre:
if self.heredoc_finish >= 0:
endpos = self.heredoc_finish
else:
endpos = sys.maxint
m = token_regex.match(self.buf, pos=pos, endpos=endpos)
else:
m = token_regex.match(tmp_buf)
return m
def _getstartend(self, m):
if self.use_rsre:
start = m.start()
end = m.end()
else:
start = self.pos + m.start()
end = self.pos + m.end()
assert start >= 0
assert end >= 0
return start, end
def token(self):
""" Return the next token (a Token object) found in the
input buffer. None is returned if the end of the
buffer was reached.
In case of a lexing error (the current chunk of the
buffer matches no rule), a LexerError is raised with
the position of the error.
"""
if self.pos >= len(self.buf):
if len(self.context_stack) != 1:
raise LexerError("contexts are not closed", -1)
return None
else:
if self.pos >= self.heredoc_finish and self.heredoc_finish != -1:
start = self.pos
end = self.pos + self.heredoc_lgt
assert start >= 0
assert end >= 0
tok = Token('T_END_HEREDOC', self.buf[start:end], self.lineno)
self.pos = self.heredoc_finish + self.heredoc_lgt
self.heredoc_finish = -1
self.heredoc_lgt = 0
self.context_stack.pop()
return tok
tmp_buf = self._gettmpbuf(self.pos)
ctx = self.context_stack[-1]
rules = self.rules[ctx]
for token_regex, token_type in rules:
pos = self.pos
assert pos >= 0
m = self.match(token_regex, tmp_buf, pos)
if m:
start, end = self._getstartend(m)
value = self.buf[start:end]
if token_type == 'H_NEW_LINE':
self.lineno += 1
elif token_type == 'T_COMMENT':
self.lineno += value.count('\n')
elif token_type == 'T_CONSTANT_ENCAPSED_STRING':
self.lineno += value.count("\n")
# tokens changing the context
tok = Token(token_type, value, self.lineno)
tok = self.maybe_change_context(ctx, tok, token_type,
end)
self.last_token = token_type
return tok
# if we're here, no rule matched
raise LexerError("unknown token", self.lineno)
def maybe_change_context(self, ctx, tok, token_type, endpos):
# print self.context_stack, tok.name, tok.value
if ctx == CONTEXT_OBJECT_ACCESS:
self.context_stack.pop()
elif (ctx == CONTEXT_NORMAL and
token_type == "T_CONSTANT_ENCAPSED_STRING" and
(tok.value[0] == '"' or tok.value[:2] == 'b"')):
newtok = self._scan_double_quote(tok)
if newtok.name == '"':
# we have to rewind a little
ofs = 1
if tok.value[0] == 'b':
ofs += 1
self.pos = endpos - len(tok.value) + ofs
else:
self.pos = endpos
return newtok
elif ctx == CONTEXT_BACKTICK and tok.value[0] == '`':
self.context_stack.pop()
elif ctx == CONTEXT_NORMAL and token_type == '`':
self.context_stack.append(CONTEXT_BACKTICK)
elif ctx == CONTEXT_BACKTICK and token_type == '"':
self.context_stack.append(CONTEXT_DOUBLEQUOTE)
elif ctx == CONTEXT_BACKTICK and token_type == '`':
self.context_stack.pop()
elif ctx == CONTEXT_NORMAL and token_type == "T_START_HEREDOC":
lgt = 3
if tok.value.startswith("b"):
lgt += 1
start = lgt
end = len(tok.value) - 1
while tok.value[start] in (' ', '\t'):
start += 1
while tok.value[end] in (' ', '\t'):
end -= 1
assert end >= 0
marker = tok.value[start:end]
if marker.startswith('"'):
if not marker.endswith('"'):
raise LexerError("wrong marker", self.lineno)
end = len(marker) - 1
assert end >= 0
marker = marker[1:end]
heredoc_marker = "\n" + marker + ";"
start = self.pos + len(tok.value) - 1
assert start >= 0
self.heredoc_finish = self.buf.find(heredoc_marker, start)
self.heredoc_lgt = len(heredoc_marker) - 1
if self.heredoc_finish == -1:
# XXX case where heredoc does not end with [;]
# its then heredoc is an argument and end like ... HEND );
heredoc_marker = "\n" + marker
self.heredoc_finish = self.buf.find(heredoc_marker, start)
if self.heredoc_finish == -1:
raise LexerError("unfinished heredoc", self.lineno)
self.heredoc_lgt = len(heredoc_marker)
self.context_stack.append(CONTEXT_HEREDOC)
elif ctx == CONTEXT_DOUBLEQUOTE and token_type == '"':
self.context_stack.pop()
elif ctx == CONTEXT_BACKTICK and token_type == '"':
self.context_stack.pop()
elif ((ctx == CONTEXT_DOUBLEQUOTE or ctx == CONTEXT_HEREDOC
or ctx == CONTEXT_BACKTICK) and
token_type == "T_DOLLAR_OPEN_CURLY_BRACES"):
self.pos = endpos - 1
self.context_stack.append(CONTEXT_CURLY_BRACES)
return tok
elif (ctx == CONTEXT_CURLY_BRACES and token_type == "{"
and self.last_token == "T_DOLLAR_OPEN_CURLY_BRACES"):
# instead, we recognize it as a variable
tmp_buf = self._gettmpbuf(self.pos)
m = self.match(self.var_re, tmp_buf, self.pos)
assert m is not None
start, end = self._getstartend(m)
tok = Token("T_VARIABLE", self.buf[start:end], tok.lineno)
self.pos = end
return tok
elif ((ctx == CONTEXT_DOUBLEQUOTE or ctx == CONTEXT_HEREDOC)
and token_type == "T_VARIABLE"):
# only if the next one is [
if self.buf[endpos] == "[":
self.context_stack.append(CONTEXT_BRACKETS)
elif ((ctx == CONTEXT_DOUBLEQUOTE or ctx == CONTEXT_HEREDOC) and
token_type == "T_OBJECT_OPERATOR"):
if (self.last_token != "T_VARIABLE" or
not self.buf[self.pos + 2].isalpha()):
tok = Token("T_ENCAPSED_AND_WHITESPACE", tok.value,
tok.lineno)
else:
self.context_stack.append(CONTEXT_OBJECT_ACCESS)
elif token_type == "T_OBJECT_OPERATOR":
self.context_stack.append(CONTEXT_OBJECT_ACCESS)
elif ctx == CONTEXT_BRACKETS and token_type == "]":
self.context_stack.pop()
elif ctx == CONTEXT_CURLY_BRACES and token_type == "}":
# XXX this is incorrect but we don't care at the moment
# if someone inserts } inside ] we have to do something else
# like scan grammar until we hit it
self.context_stack.pop()
self.pos = endpos
return tok
def tokens(self):
""" Returns an iterator to the tokens found in the buffer.
"""
while 1:
tok = self.token()
# print tok.name, tok.value
if tok is None:
break
while tok.name in ('H_NEW_LINE', 'H_WHITESPACE',
'T_COMMENT', 'H_TABULATURE'):
tok = self.token()
if tok is None:
break
yield tok
|
#!/usr/bin/env python
# coding=utf-8
# Author: Yao
# Mail: zhangyao215@mails.ucas.ac.cn
import os
import argparse
import numpy as np
import pandas as pd
import nibabel as nib
from medpy import metric
from collections import OrderedDict
from scipy.ndimage.measurements import label
from metrics import *
join = os.path.join
def print_case_results(filename, label, scores):
print('{} label: {}'.format(filename, label))
for metric in metric_list:
print(metric, scores[filename][label][metric])
def print_summary_results(label, scores):
print('{}'.format(label))
for metric in metric_list:
print('mean_'+metric, scores[label]['mean_'+metric])
print('std_'+metric, scores[label]['std_'+metric])
print('len: {}'.format(len(scores[label][metric])))
if __name__ == "__main__":
'''
parser = argparse.ArgumentParser()
parser.add_argument("pred_path")
parser.add_argument("gt_path")
parser.add_argument("n_label", type=int)
args = parser.parse_args()
pred_path = args.pred_path
gt_path = args.gt_path
n_label = args.n_label
pred_path = join('./nnUNet_base/nnUNet_results/nnUNet', args.model, args.task, 'nnUNetTrainer__nnUNetPlans', 'fold_{}'.format(args.fold) if len(args.fold)==1 else 'all', 'validation')
gt_path = join('./nnUNet_base/nnUNet_raw_splitted', args.task, 'labelsTr')
'''
task = 'Task000_KiTSbaseline'
model = 'nnUNetTrainerV2_ImgDAObjAllinter10percents__nnUNetPlansv2.1'
fold = 'fold_0'
pred_path = join('../data/RESULTS_FOLDER/nnUNet/3d_fullres', task, model, fold, 'validation_raw')
gt_path = join('../data/nnUNet_raw_data', task, 'labelsTr')
n_label = 3
# metric_list = ['dice', 'jaccard', 'hausdorff_distance_95', 'avg_surface_distance_symmetric', 'precision', 'recall']
metric_list = ['dice', 'hausdorff_distance_95']
# metric_list = ['dice']
label_range = range(1, n_label) # start with 1 to exclude background
exclude_list = ['00002', '00020', '00045', '00094', '00115', '00124']
# exclude_list = []
file_list = np.sort(os.listdir(pred_path))
file_list = [x for x in file_list if x.endswith('nii.gz') and x.split('.')[0].split('_')[1] not in exclude_list]
print('files len:', len(file_list))
scores = {}
for i in label_range:
scores[i] = {}
for metric in metric_list:
scores[i][metric] = []
#################### aggregate results of each case
for filename in file_list:
scores[filename] = {}
affine = nib.load(join(gt_path, filename)).affine
ori_pred_volume = nib.load(join(pred_path, filename)).get_fdata()
ori_gt_volume = nib.load(join(gt_path, filename)).get_fdata()
for i in label_range:
scores[filename][i] = {}
confusion_matrix = ConfusionMatrix(ori_pred_volume>=i, ori_gt_volume>=i)
# label does not exist
# np.sum(ori_gt_volume[ori_gt_volume == i]) == 0:
# continue
for metric in metric_list:
scores[filename][i][metric] = eval(metric)(ori_pred_volume>=i, ori_gt_volume>=i, confusion_matrix, nan_for_nonexisting=False)
scores[i][metric].append(scores[filename][i][metric])
print_case_results(filename, i, scores)
##########################################
######### aggregate results as a summary
for i in label_range:
for metric in metric_list:
scores[i]['mean_'+metric] = np.mean(scores[i][metric])
scores[i]['std_'+metric] = np.std(scores[i][metric])
print_summary_results(i, scores)
###################################
########## save as csv
header = []
for i in label_range:
for metric in metric_list:
header.append(metric+'_for_label'+str(i))
rows = []
for k in file_list:
row = []
for i in label_range:
if len(scores[k][i].values()) > 0:
row += scores[k][i].values()
else:
row += [0] * len(metric_list)
rows.append(row)
row = []
for i in label_range:
for metric in metric_list:
row.append(scores[i]['mean_'+metric])
# row.append(scores[i]['std_'+metric])
rows.append(row)
row = []
for i in label_range:
for metric in metric_list:
# row.append(scores[i]['mean_'+metric])
row.append(scores[i]['std_'+metric])
rows.append(row)
subject_ids = file_list + ['mean', 'std']
df = pd.DataFrame.from_records(rows, columns=header, index=subject_ids)
df.to_csv(join(pred_path, 'results.csv'))
########################
|
#!/usr/bin/python
# coding: utf-8
#
# example:
# $ ./xsvf -c disasm ../xsvf/XC2C64A/idcode_simpler.xsvf
#
import JTAGTAP
import XSVFDecoder
class XSVFDisassembler(XSVFDecoder.XSVFDecoder):
"""
XSVF Disassembler
"""
@staticmethod
def add_arguments(p):
"""Adds the necessary arguments to the parser."""
p.add_argument(
'-n', '--no_bytes',
action='store_true',
# type=bool,
help='Do not output bytes'
' (default=%(default)s)')
def __init__(self, args):
XSVFDecoder.XSVFDecoder.__init__(self, args)
self._args = args
self._current_instruction = 0
self._instruction_handlers = (
self.disasm_xcomplete,
self.disasm_xtdomask,
self.disasm_xsir,
self.disasm_xsdr,
self.disasm_xruntest,
self.disasm_xreserved_5,
self.disasm_xreserved_6,
self.disasm_xrepeat,
self.disasm_xsdrsize,
self.disasm_xsdrtdo,
self.disasm_xsetsdrmasks,
self.disasm_xsdrinc,
self.disasm_xsdrb,
self.disasm_xsdrc,
self.disasm_xsdre,
self.disasm_xsdrtdob,
self.disasm_xsdrtdoc,
self.disasm_xsdrtdoe,
self.disasm_xstate,
self.disasm_xendir,
self.disasm_xenddr,
self.disasm_xsir2,
self.disasm_xcomment,
self.disasm_xwait,
)
@property
def current_instruction(self):
return self._current_instruction
@current_instruction.setter
def current_instruction(self, value):
self._current_instruction = value
def format_first_part(self, s):
"""
Breaks the instruction's bytes in lines of 8 bytes.
:param s: string of bytes
:return: list of 8 byte strings
"""
l = []
while s:
l.append(s[:24])
s = s[24:]
return l
def return_zeroeth(self, l):
"""
Returns the zeroeth element of the list, right whitespace stripped,
but avoid returning None if the result is empty.
"""
if l:
l_0 = l[0].rstrip()
else:
l_0 = ""
return l_0
def format_byte_list(self, l):
return ' {:s}'.format(' '.join('{0:02X}'.format(x) for x in l))
def pretty_disasm(self, pars=("",)):
l1 = self.format_first_part(self.current_instruction_string)
l2 = list(pars)
l2[0] = '{:s}{:s}'.format(
self.instruction_name(self.current_instruction),
l2[0])
while l1 or l2:
l1_0 = self.return_zeroeth(l1)
l2_0 = self.return_zeroeth(l2)
if self._args.no_bytes:
if l2_0:
print('{0:s}'.format(l2_0))
else:
if l2_0:
print('{0:<24} {1:s}'.format(l1_0, l2_0))
else:
print('{0:s}'.format(l1_0))
l1 = l1[1:]
l2 = l2[1:]
def disasm_xcomplete(self):
self.pretty_disasm()
def disasm_xtdomask(self):
p = (
'',
self.format_byte_list(self.tdo_mask)
)
self.pretty_disasm(p)
def disasm_xsir(self):
p = (
' {:d} {:s}'.format(
self.sirsize_bits,
self.format_byte_list(self.tdi).strip()),
)
self.pretty_disasm(p)
def disasm_xsdr(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xruntest(self):
p = (
' {:d}'.format(self.runtest),
)
self.pretty_disasm(p)
def disasm_xreserved_5(self):
self.pretty_disasm()
def disasm_xreserved_6(self):
self.pretty_disasm()
def disasm_xrepeat(self):
p = (
' {:d}'.format(self.repeat),
)
self.pretty_disasm(p)
def disasm_xsdrsize(self):
p = (
' {0}'.format(self.sdrsize_bits),
)
self.pretty_disasm(p)
def disasm_xsdrtdo(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xsetsdrmasks(self):
p = (
'',
self.format_byte_list(self.address_mask) + ',',
self.format_byte_list(self.data_mask)
)
self.pretty_disasm(p)
def disasm_xsdrinc(self):
p = [
'',
self.format_byte_list(self.xsdrinc_start_address) + ',',
' {:d},'.format(self.xsdrinc_num_times)
]
n = self.xsdrinc_num_times
for l in self.xsdrinc_data_list:
s = self.format_byte_list(l)
n -= 1
# Adds a comma, unless it's the last one
if n:
s += ','
p.append(s)
self.pretty_disasm(p)
def disasm_xsdrb(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xsdrc(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xsdre(self):
p = (
'',
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xsdrtdob(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xsdrtdoc(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xsdrtdoe(self):
p = (
'',
self.format_byte_list(self.tdi) + ',',
self.format_byte_list(self.tdo_expected)
)
self.pretty_disasm(p)
def disasm_xstate(self):
p = (
' {:s}'.format(JTAGTAP.JTAGTAP.state_name(self.next_state)),
)
self.pretty_disasm(p)
def disasm_xendir(self):
p = (
' {:s}'.format(JTAGTAP.JTAGTAP.state_name(self.endir_state)),
)
self.pretty_disasm(p)
def disasm_xenddr(self):
p = (
' {:s}'.format(JTAGTAP.JTAGTAP.state_name(self.enddr_state)),
)
self.pretty_disasm(p)
def disasm_xsir2(self):
p = (
' {:d}'.format(self.sirsize_bits),
self.format_byte_list(self.tdi)
)
self.pretty_disasm(p)
def disasm_xcomment(self):
p = (
' "{:s}"'.format(self.comment),
)
self.pretty_disasm(p)
def disasm_xwait(self):
p = (
' {:s} {:s} {:d}'.format(
JTAGTAP.JTAGTAP.state_name(self.wait_start_state),
JTAGTAP.JTAGTAP.state_name(self.wait_end_state),
self.wait_time_usecs),
)
self.pretty_disasm(p)
#
def instruction_handler(self, instruction):
self.current_instruction = instruction
self._instruction_handlers[instruction]()
def disasm_all_files(self, fd_list):
return self.decode_all_files(fd_list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.