content stringlengths 5 1.05M |
|---|
import io
import os
import re
import time
from unit.applications.lang.java import TestApplicationJava
from unit.option import option
from unit.utils import public_dir
class TestJavaApplication(TestApplicationJava):
prerequisites = {'modules': {'java': 'all'}}
def test_java_conf_error(self, temp_dir, skip_alert):
skip_alert(
r'realpath.*failed',
r'failed to apply new conf',
r'application setup failed',
)
assert 'error' in self.conf(
{
"listeners": {"*:7080": {"pass": "applications/app"}},
"applications": {
"app": {
"type": "java",
"processes": 1,
"working_directory": option.test_dir + "/java/empty",
"webapp": temp_dir + "/java",
"unit_jars": temp_dir + "/no_such_dir",
}
},
}
), 'conf error'
def test_java_war(self, temp_dir):
self.load('empty_war')
assert 'success' in self.conf(
'"' + temp_dir + '/java/empty.war"',
'/config/applications/empty_war/webapp',
), 'configure war'
assert self.get()['status'] == 200, 'war'
def test_java_application_cookies(self):
self.load('cookies')
headers = self.get(
headers={
'Cookie': 'var1=val1; var2=val2',
'Host': 'localhost',
'Connection': 'close',
}
)['headers']
assert headers['X-Cookie-1'] == 'val1', 'cookie 1'
assert headers['X-Cookie-2'] == 'val2', 'cookie 2'
def test_java_application_filter(self):
self.load('filter')
headers = self.get()['headers']
assert headers['X-Filter-Before'] == '1', 'filter before'
assert headers['X-Filter-After'] == '1', 'filter after'
assert (
self.get(url='/test')['headers']['X-Filter-After'] == '0'
), 'filter after 2'
def test_java_application_get_variables(self):
self.load('get_params')
headers = self.get(url='/?var1=val1&var2=&var4=val4&var4=foo')[
'headers'
]
assert headers['X-Var-1'] == 'val1', 'GET variables'
assert headers['X-Var-2'] == 'true', 'GET variables 2'
assert headers['X-Var-3'] == 'false', 'GET variables 3'
assert (
headers['X-Param-Names'] == 'var4 var2 var1 '
), 'getParameterNames'
assert headers['X-Param-Values'] == 'val4 foo ', 'getParameterValues'
assert (
headers['X-Param-Map'] == 'var2= var1=val1 var4=val4,foo '
), 'getParameterMap'
def test_java_application_post_variables(self):
self.load('post_params')
headers = self.post(
headers={
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'localhost',
'Connection': 'close',
},
body='var1=val1&var2=',
)['headers']
assert headers['X-Var-1'] == 'val1', 'POST variables'
assert headers['X-Var-2'] == 'true', 'POST variables 2'
assert headers['X-Var-3'] == 'false', 'POST variables 3'
def test_java_application_session(self):
self.load('session')
headers = self.get(url='/?var1=val1')['headers']
session_id = headers['X-Session-Id']
assert headers['X-Var-1'] == 'null', 'variable empty'
assert headers['X-Session-New'] == 'true', 'session create'
headers = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
},
url='/?var1=val2',
)['headers']
assert headers['X-Var-1'] == 'val1', 'variable'
assert headers['X-Session-New'] == 'false', 'session resume'
assert session_id == headers['X-Session-Id'], 'session same id'
def test_java_application_session_active(self):
self.load('session_inactive')
resp = self.get(
headers={
'X-Interval': '4',
'Host': 'localhost',
'Connection': 'close',
}
)
session_id = resp['headers']['X-Session-Id']
assert resp['status'] == 200, 'session init'
assert resp['headers']['X-Session-Interval'] == '4', 'session interval'
assert (
abs(
self.date_to_sec_epoch(
resp['headers']['X-Session-Last-Access-Time']
)
- self.sec_epoch()
)
< 5
), 'session last access time'
time.sleep(1)
resp = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
}
)
assert resp['headers']['X-Session-Id'] == session_id, 'session active'
session_id = resp['headers']['X-Session-Id']
time.sleep(1)
resp = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
}
)
assert (
resp['headers']['X-Session-Id'] == session_id
), 'session active 2'
time.sleep(2)
resp = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
}
)
assert (
resp['headers']['X-Session-Id'] == session_id
), 'session active 3'
def test_java_application_session_inactive(self):
self.load('session_inactive')
resp = self.get(
headers={
'X-Interval': '1',
'Host': 'localhost',
'Connection': 'close',
}
)
session_id = resp['headers']['X-Session-Id']
time.sleep(3)
resp = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
}
)
assert (
resp['headers']['X-Session-Id'] != session_id
), 'session inactive'
def test_java_application_session_invalidate(self):
self.load('session_invalidate')
resp = self.get()
session_id = resp['headers']['X-Session-Id']
resp = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
}
)
assert (
resp['headers']['X-Session-Id'] != session_id
), 'session invalidate'
def test_java_application_session_listeners(self):
self.load('session_listeners')
headers = self.get(url='/test?var1=val1')['headers']
session_id = headers['X-Session-Id']
assert headers['X-Session-Created'] == session_id, 'session create'
assert headers['X-Attr-Added'] == 'var1=val1', 'attribute add'
headers = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
},
url='/?var1=val2',
)['headers']
assert session_id == headers['X-Session-Id'], 'session same id'
assert headers['X-Attr-Replaced'] == 'var1=val1', 'attribute replace'
headers = self.get(
headers={
'Host': 'localhost',
'Cookie': 'JSESSIONID=' + session_id,
'Connection': 'close',
},
url='/',
)['headers']
assert session_id == headers['X-Session-Id'], 'session same id'
assert headers['X-Attr-Removed'] == 'var1=val2', 'attribute remove'
def test_java_application_jsp(self):
self.load('jsp')
headers = self.get(url='/index.jsp')['headers']
assert headers['X-Unit-JSP'] == 'ok', 'JSP Ok header'
def test_java_application_url_pattern(self):
self.load('url_pattern')
headers = self.get(url='/foo/bar/index.html')['headers']
assert headers['X-Id'] == 'servlet1', '#1 Servlet1 request'
assert (
headers['X-Request-URI'] == '/foo/bar/index.html'
), '#1 request URI'
assert headers['X-Servlet-Path'] == '/foo/bar', '#1 servlet path'
assert headers['X-Path-Info'] == '/index.html', '#1 path info'
headers = self.get(url='/foo/bar/index.bop')['headers']
assert headers['X-Id'] == 'servlet1', '#2 Servlet1 request'
assert (
headers['X-Request-URI'] == '/foo/bar/index.bop'
), '#2 request URI'
assert headers['X-Servlet-Path'] == '/foo/bar', '#2 servlet path'
assert headers['X-Path-Info'] == '/index.bop', '#2 path info'
headers = self.get(url='/baz')['headers']
assert headers['X-Id'] == 'servlet2', '#3 Servlet2 request'
assert headers['X-Request-URI'] == '/baz', '#3 request URI'
assert headers['X-Servlet-Path'] == '/baz', '#3 servlet path'
assert headers['X-Path-Info'] == 'null', '#3 path info'
headers = self.get(url='/baz/index.html')['headers']
assert headers['X-Id'] == 'servlet2', '#4 Servlet2 request'
assert headers['X-Request-URI'] == '/baz/index.html', '#4 request URI'
assert headers['X-Servlet-Path'] == '/baz', '#4 servlet path'
assert headers['X-Path-Info'] == '/index.html', '#4 path info'
headers = self.get(url='/catalog')['headers']
assert headers['X-Id'] == 'servlet3', '#5 Servlet3 request'
assert headers['X-Request-URI'] == '/catalog', '#5 request URI'
assert headers['X-Servlet-Path'] == '/catalog', '#5 servlet path'
assert headers['X-Path-Info'] == 'null', '#5 path info'
headers = self.get(url='/catalog/index.html')['headers']
assert headers['X-Id'] == 'default', '#6 default request'
assert (
headers['X-Request-URI'] == '/catalog/index.html'
), '#6 request URI'
assert (
headers['X-Servlet-Path'] == '/catalog/index.html'
), '#6 servlet path'
assert headers['X-Path-Info'] == 'null', '#6 path info'
headers = self.get(url='/catalog/racecar.bop')['headers']
assert headers['X-Id'] == 'servlet4', '#7 servlet4 request'
assert (
headers['X-Request-URI'] == '/catalog/racecar.bop'
), '#7 request URI'
assert (
headers['X-Servlet-Path'] == '/catalog/racecar.bop'
), '#7 servlet path'
assert headers['X-Path-Info'] == 'null', '#7 path info'
headers = self.get(url='/index.bop')['headers']
assert headers['X-Id'] == 'servlet4', '#8 servlet4 request'
assert headers['X-Request-URI'] == '/index.bop', '#8 request URI'
assert headers['X-Servlet-Path'] == '/index.bop', '#8 servlet path'
assert headers['X-Path-Info'] == 'null', '#8 path info'
headers = self.get(url='/foo/baz')['headers']
assert headers['X-Id'] == 'servlet0', '#9 servlet0 request'
assert headers['X-Request-URI'] == '/foo/baz', '#9 request URI'
assert headers['X-Servlet-Path'] == '/foo', '#9 servlet path'
assert headers['X-Path-Info'] == '/baz', '#9 path info'
headers = self.get()['headers']
assert headers['X-Id'] == 'default', '#10 default request'
assert headers['X-Request-URI'] == '/', '#10 request URI'
assert headers['X-Servlet-Path'] == '/', '#10 servlet path'
assert headers['X-Path-Info'] == 'null', '#10 path info'
headers = self.get(url='/index.bop/')['headers']
assert headers['X-Id'] == 'default', '#11 default request'
assert headers['X-Request-URI'] == '/index.bop/', '#11 request URI'
assert headers['X-Servlet-Path'] == '/index.bop/', '#11 servlet path'
assert headers['X-Path-Info'] == 'null', '#11 path info'
def test_java_application_header(self):
self.load('header')
headers = self.get()['headers']
assert headers['X-Set-Utf8-Value'] == '????', 'set Utf8 header value'
assert headers['X-Set-Utf8-Name-???'] == 'x', 'set Utf8 header name'
assert headers['X-Add-Utf8-Value'] == '????', 'add Utf8 header value'
assert headers['X-Add-Utf8-Name-???'] == 'y', 'add Utf8 header name'
assert headers['X-Add-Test'] == 'v1', 'add null header'
assert ('X-Set-Test1' in headers) == False, 'set null header'
assert headers['X-Set-Test2'] == '', 'set empty header'
def test_java_application_content_type(self):
self.load('content_type')
headers = self.get(url='/1')['headers']
assert (
headers['Content-Type'] == 'text/plain;charset=utf-8'
), '#1 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/plain;charset=utf-8'
), '#1 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'utf-8'
), '#1 response charset'
headers = self.get(url='/2')['headers']
assert (
headers['Content-Type'] == 'text/plain;charset=iso-8859-1'
), '#2 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/plain;charset=iso-8859-1'
), '#2 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'iso-8859-1'
), '#2 response charset'
headers = self.get(url='/3')['headers']
assert (
headers['Content-Type'] == 'text/plain;charset=windows-1251'
), '#3 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/plain;charset=windows-1251'
), '#3 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'windows-1251'
), '#3 response charset'
headers = self.get(url='/4')['headers']
assert (
headers['Content-Type'] == 'text/plain;charset=windows-1251'
), '#4 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/plain;charset=windows-1251'
), '#4 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'windows-1251'
), '#4 response charset'
headers = self.get(url='/5')['headers']
assert (
headers['Content-Type'] == 'text/plain;charset=iso-8859-1'
), '#5 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/plain;charset=iso-8859-1'
), '#5 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'iso-8859-1'
), '#5 response charset'
headers = self.get(url='/6')['headers']
assert (
'Content-Type' in headers
) == False, '#6 no Content-Type header'
assert (
'X-Content-Type' in headers
) == False, '#6 no response Content-Type'
assert (
headers['X-Character-Encoding'] == 'utf-8'
), '#6 response charset'
headers = self.get(url='/7')['headers']
assert (
headers['Content-Type'] == 'text/plain;charset=utf-8'
), '#7 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/plain;charset=utf-8'
), '#7 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'utf-8'
), '#7 response charset'
headers = self.get(url='/8')['headers']
assert (
headers['Content-Type'] == 'text/html;charset=utf-8'
), '#8 Content-Type header'
assert (
headers['X-Content-Type'] == 'text/html;charset=utf-8'
), '#8 response Content-Type'
assert (
headers['X-Character-Encoding'] == 'utf-8'
), '#8 response charset'
def test_java_application_welcome_files(self):
self.load('welcome_files')
headers = self.get()['headers']
resp = self.get(url='/dir1')
assert resp['status'] == 302, 'dir redirect expected'
resp = self.get(url='/dir1/')
assert (
'This is index.txt.' in resp['body']
) == True, 'dir1 index body'
assert resp['headers']['X-TXT-Filter'] == '1', 'TXT Filter header'
headers = self.get(url='/dir2/')['headers']
assert headers['X-Unit-JSP'] == 'ok', 'JSP Ok header'
assert headers['X-JSP-Filter'] == '1', 'JSP Filter header'
headers = self.get(url='/dir3/')['headers']
assert (
headers['X-App-Servlet'] == '1'
), 'URL pattern overrides welcome file'
headers = self.get(url='/dir4/')['headers']
assert (
'X-App-Servlet' in headers
) == False, 'Static welcome file served first'
headers = self.get(url='/dir5/')['headers']
assert (
headers['X-App-Servlet'] == '1'
), 'Servlet for welcome file served when no static file found'
def test_java_application_request_listeners(self):
self.load('request_listeners')
headers = self.get(url='/test1')['headers']
assert (
headers['X-Request-Initialized'] == '/test1'
), 'request initialized event'
assert headers['X-Request-Destroyed'] == '', 'request destroyed event'
assert headers['X-Attr-Added'] == '', 'attribute added event'
assert headers['X-Attr-Removed'] == '', 'attribute removed event'
assert headers['X-Attr-Replaced'] == '', 'attribute replaced event'
headers = self.get(url='/test2?var1=1')['headers']
assert (
headers['X-Request-Initialized'] == '/test2'
), 'request initialized event'
assert (
headers['X-Request-Destroyed'] == '/test1'
), 'request destroyed event'
assert headers['X-Attr-Added'] == 'var=1;', 'attribute added event'
assert headers['X-Attr-Removed'] == 'var=1;', 'attribute removed event'
assert headers['X-Attr-Replaced'] == '', 'attribute replaced event'
headers = self.get(url='/test3?var1=1&var2=2')['headers']
assert (
headers['X-Request-Initialized'] == '/test3'
), 'request initialized event'
assert (
headers['X-Request-Destroyed'] == '/test2'
), 'request destroyed event'
assert headers['X-Attr-Added'] == 'var=1;', 'attribute added event'
assert headers['X-Attr-Removed'] == 'var=2;', 'attribute removed event'
assert (
headers['X-Attr-Replaced'] == 'var=1;'
), 'attribute replaced event'
headers = self.get(url='/test4?var1=1&var2=2&var3=3')['headers']
assert (
headers['X-Request-Initialized'] == '/test4'
), 'request initialized event'
assert (
headers['X-Request-Destroyed'] == '/test3'
), 'request destroyed event'
assert headers['X-Attr-Added'] == 'var=1;', 'attribute added event'
assert headers['X-Attr-Removed'] == '', 'attribute removed event'
assert (
headers['X-Attr-Replaced'] == 'var=1;var=2;'
), 'attribute replaced event'
def test_java_application_request_uri_forward(self):
self.load('forward')
resp = self.get(
url='/fwd?uri=%2Fdata%2Ftest%3Furi%3Dnew_uri%26a%3D2%26b%3D3&a=1&c=4'
)
headers = resp['headers']
assert (
headers['X-REQUEST-Id'] == 'fwd'
), 'initial request servlet mapping'
assert (
headers['X-Forward-To'] == '/data/test?uri=new_uri&a=2&b=3'
), 'forwarding triggered'
assert (
headers['X-REQUEST-Param-uri'] == '/data/test?uri=new_uri&a=2&b=3'
), 'original uri parameter'
assert headers['X-REQUEST-Param-a'] == '1', 'original a parameter'
assert headers['X-REQUEST-Param-c'] == '4', 'original c parameter'
assert (
headers['X-FORWARD-Id'] == 'data'
), 'forward request servlet mapping'
assert (
headers['X-FORWARD-Request-URI'] == '/data/test'
), 'forward request uri'
assert (
headers['X-FORWARD-Servlet-Path'] == '/data'
), 'forward request servlet path'
assert (
headers['X-FORWARD-Path-Info'] == '/test'
), 'forward request path info'
assert (
headers['X-FORWARD-Query-String'] == 'uri=new_uri&a=2&b=3'
), 'forward request query string'
assert (
headers['X-FORWARD-Param-uri']
== 'new_uri,/data/test?uri=new_uri&a=2&b=3'
), 'forward uri parameter'
assert headers['X-FORWARD-Param-a'] == '2,1', 'forward a parameter'
assert headers['X-FORWARD-Param-b'] == '3', 'forward b parameter'
assert headers['X-FORWARD-Param-c'] == '4', 'forward c parameter'
assert (
headers['X-javax.servlet.forward.request_uri'] == '/fwd'
), 'original request uri'
assert (
headers['X-javax.servlet.forward.context_path'] == ''
), 'original request context path'
assert (
headers['X-javax.servlet.forward.servlet_path'] == '/fwd'
), 'original request servlet path'
assert (
headers['X-javax.servlet.forward.path_info'] == 'null'
), 'original request path info'
assert (
headers['X-javax.servlet.forward.query_string']
== 'uri=%2Fdata%2Ftest%3Furi%3Dnew_uri%26a%3D2%26b%3D3&a=1&c=4'
), 'original request query'
assert (
'Before forwarding' in resp['body']
) == False, 'discarded data added before forward() call'
assert (
'X-After-Forwarding' in headers
) == False, 'cannot add headers after forward() call'
assert (
'After forwarding' in resp['body']
) == False, 'cannot add data after forward() call'
def test_java_application_named_dispatcher_forward(self):
self.load('forward')
resp = self.get(url='/fwd?disp=name&uri=data')
headers = resp['headers']
assert (
headers['X-REQUEST-Id'] == 'fwd'
), 'initial request servlet mapping'
assert headers['X-Forward-To'] == 'data', 'forwarding triggered'
assert (
headers['X-FORWARD-Id'] == 'data'
), 'forward request servlet mapping'
assert (
headers['X-FORWARD-Request-URI'] == '/fwd'
), 'forward request uri'
assert (
headers['X-FORWARD-Servlet-Path'] == '/fwd'
), 'forward request servlet path'
assert (
headers['X-FORWARD-Path-Info'] == 'null'
), 'forward request path info'
assert (
headers['X-FORWARD-Query-String'] == 'disp=name&uri=data'
), 'forward request query string'
assert (
headers['X-javax.servlet.forward.request_uri'] == 'null'
), 'original request uri'
assert (
headers['X-javax.servlet.forward.context_path'] == 'null'
), 'original request context path'
assert (
headers['X-javax.servlet.forward.servlet_path'] == 'null'
), 'original request servlet path'
assert (
headers['X-javax.servlet.forward.path_info'] == 'null'
), 'original request path info'
assert (
headers['X-javax.servlet.forward.query_string'] == 'null'
), 'original request query'
assert (
'Before forwarding' in resp['body']
) == False, 'discarded data added before forward() call'
assert (
'X-After-Forwarding' in headers
) == False, 'cannot add headers after forward() call'
assert (
'After forwarding' in resp['body']
) == False, 'cannot add data after forward() call'
def test_java_application_request_uri_include(self):
self.load('include')
resp = self.get(url='/inc?uri=/data/test')
headers = resp['headers']
body = resp['body']
assert (
headers['X-REQUEST-Id'] == 'inc'
), 'initial request servlet mapping'
assert headers['X-Include'] == '/data/test', 'including triggered'
assert (
'X-INCLUDE-Id' in headers
) == False, 'unable to add headers in include request'
assert (
'javax.servlet.include.request_uri: /data/test' in body
) == True, 'include request uri'
#assert (
# 'javax.servlet.include.context_path: ' in body
#) == True, 'include request context path'
assert (
'javax.servlet.include.servlet_path: /data' in body
) == True, 'include request servlet path'
assert (
'javax.servlet.include.path_info: /test' in body
) == True, 'include request path info'
assert (
'javax.servlet.include.query_string: null' in body
) == True, 'include request query'
assert (
'Before include' in body
) == True, 'preserve data added before include() call'
assert (
headers['X-After-Include'] == 'you-should-see-this'
), 'add headers after include() call'
assert (
'After include' in body
) == True, 'add data after include() call'
def test_java_application_named_dispatcher_include(self):
self.load('include')
resp = self.get(url='/inc?disp=name&uri=data')
headers = resp['headers']
body = resp['body']
assert (
headers['X-REQUEST-Id'] == 'inc'
), 'initial request servlet mapping'
assert headers['X-Include'] == 'data', 'including triggered'
assert (
'X-INCLUDE-Id' in headers
) == False, 'unable to add headers in include request'
assert (
'javax.servlet.include.request_uri: null' in body
) == True, 'include request uri'
#assert (
# 'javax.servlet.include.context_path: null' in body
#) == True, 'include request context path'
assert (
'javax.servlet.include.servlet_path: null' in body
) == True, 'include request servlet path'
assert (
'javax.servlet.include.path_info: null' in body
) == True, 'include request path info'
assert (
'javax.servlet.include.query_string: null' in body
) == True, 'include request query'
assert (
'Before include' in body
) == True, 'preserve data added before include() call'
assert (
headers['X-After-Include'] == 'you-should-see-this'
), 'add headers after include() call'
assert (
'After include' in body
) == True, 'add data after include() call'
def test_java_application_path_translation(self):
self.load('path_translation')
headers = self.get(url='/pt/test?path=/')['headers']
assert headers['X-Servlet-Path'] == '/pt', 'matched servlet path'
assert headers['X-Path-Info'] == '/test', 'the rest of the path'
assert (
headers['X-Path-Translated']
== headers['X-Real-Path'] + headers['X-Path-Info']
), 'translated path is the app root + path info'
assert (
headers['X-Resource-Paths'].endswith('/WEB-INF/, /index.html]')
== True
), 'app root directory content'
assert (
headers['X-Resource-As-Stream'] == 'null'
), 'no resource stream for root path'
headers = self.get(url='/test?path=/none')['headers']
assert headers['X-Servlet-Path'] == '/test', 'matched whole path'
assert (
headers['X-Path-Info'] == 'null'
), 'the rest of the path is null, whole path matched'
assert (
headers['X-Path-Translated'] == 'null'
), 'translated path is null because path info is null'
assert (
headers['X-Real-Path'].endswith('/none') == True
), 'read path is not null'
assert headers['X-Resource-Paths'] == 'null', 'no resource found'
assert headers['X-Resource-As-Stream'] == 'null', 'no resource stream'
def test_java_application_query_string(self):
self.load('query_string')
assert (
self.get(url='/?a=b')['headers']['X-Query-String'] == 'a=b'
), 'query string'
def test_java_application_query_empty(self):
self.load('query_string')
assert (
self.get(url='/?')['headers']['X-Query-String'] == ''
), 'query string empty'
def test_java_application_query_absent(self):
self.load('query_string')
assert (
self.get()['headers']['X-Query-String'] == 'null'
), 'query string absent'
def test_java_application_empty(self):
self.load('empty')
assert self.get()['status'] == 200, 'empty'
def test_java_application_keepalive_body(self):
self.load('mirror')
assert self.post()['status'] == 200, 'init'
body = '0123456789' * 500
(resp, sock) = self.post(
headers={
'Connection': 'keep-alive',
'Content-Type': 'text/html',
'Host': 'localhost',
},
start=True,
body=body,
read_timeout=1,
)
assert resp['body'] == body, 'keep-alive 1'
body = '0123456789'
resp = self.post(
headers={
'Connection': 'close',
'Content-Type': 'text/html',
'Host': 'localhost',
},
sock=sock,
body=body,
)
assert resp['body'] == body, 'keep-alive 2'
def test_java_application_http_10(self):
self.load('empty')
assert self.get(http_10=True)['status'] == 200, 'HTTP 1.0'
def test_java_application_no_method(self):
self.load('empty')
assert self.post()['status'] == 405, 'no method'
def test_java_application_get_header(self):
self.load('get_header')
assert (
self.get(
headers={
'X-Header': 'blah',
'Content-Type': 'text/html',
'Host': 'localhost',
'Connection': 'close',
}
)['headers']['X-Reply']
== 'blah'
), 'get header'
def test_java_application_get_header_empty(self):
self.load('get_header')
assert 'X-Reply' not in self.get()['headers'], 'get header empty'
def test_java_application_get_headers(self):
self.load('get_headers')
headers = self.get(
headers={
'X-Header': ['blah', 'blah'],
'Content-Type': 'text/html',
'Host': 'localhost',
'Connection': 'close',
}
)['headers']
assert headers['X-Reply-0'] == 'blah', 'get headers'
assert headers['X-Reply-1'] == 'blah', 'get headers 2'
def test_java_application_get_headers_empty(self):
self.load('get_headers')
assert 'X-Reply-0' not in self.get()['headers'], 'get headers empty'
def test_java_application_get_header_names(self):
self.load('get_header_names')
headers = self.get()['headers']
assert re.search(
r'(?:Host|Connection)', headers['X-Reply-0']
), 'get header names'
assert re.search(
r'(?:Host|Connection)', headers['X-Reply-1']
), 'get header names 2'
assert (
headers['X-Reply-0'] != headers['X-Reply-1']
), 'get header names not equal'
def test_java_application_header_int(self):
self.load('header_int')
headers = self.get(
headers={
'X-Header': '2',
'Content-Type': 'text/html',
'Host': 'localhost',
'Connection': 'close',
}
)['headers']
assert headers['X-Set-Int'] == '1', 'set int header'
assert headers['X-Get-Int'] == '2', 'get int header'
def test_java_application_header_date(self):
self.load('header_date')
date = 'Fri, 15 Mar 2019 14:45:34 GMT'
headers = self.get(
headers={
'X-Header': date,
'Content-Type': 'text/html',
'Host': 'localhost',
'Connection': 'close',
}
)['headers']
assert (
headers['X-Set-Date'] == 'Thu, 01 Jan 1970 00:00:01 GMT'
), 'set date header'
assert headers['X-Get-Date'] == date, 'get date header'
def test_java_application_multipart(self, temp_dir):
self.load('multipart')
reldst = '/uploads'
fulldst = temp_dir + reldst
os.mkdir(fulldst)
public_dir(fulldst)
fields = {
'file': {
'filename': 'sample.txt',
'type': 'text/plain',
'data': io.StringIO('Data from sample file'),
},
'destination': fulldst,
'upload': 'Upload',
}
encoded, content_type = self.multipart_encode(fields)
preamble = 'Preamble. Should be ignored.'
epilogue = 'Epilogue. Should be ignored.'
body = "%s\r\n%s\r\n%s" % (preamble, encoded.decode(), epilogue)
resp = self.post(
headers={
'Content-Type': content_type,
'Host': 'localhost',
'Connection': 'close',
},
body=body,
)
assert resp['status'] == 200, 'multipart status'
assert re.search(
r'sample\.txt created', resp['body']
), 'multipart body'
assert (
self.search_in_log(
r'^Data from sample file$', name=reldst + '/sample.txt'
)
is not None
), 'file created'
def test_java_application_threads(self):
self.load('threads')
assert 'success' in self.conf(
'4', 'applications/threads/threads'
), 'configure 4 threads'
socks = []
for i in range(4):
(_, sock) = self.get(
headers={
'Host': 'localhost',
'X-Delay': '2',
'Connection': 'close',
},
no_recv=True,
start=True,
)
socks.append(sock)
time.sleep(0.25) # required to avoid greedy request reading
threads = set()
for sock in socks:
resp = self.recvall(sock).decode('utf-8')
self.log_in(resp)
resp = self._resp_to_dict(resp)
assert resp['status'] == 200, 'status'
threads.add(resp['headers']['X-Thread'])
sock.close()
assert len(socks) == len(threads), 'threads differs'
|
from collections import Counter, Iterable, defaultdict
from itertools import chain, islice, product
from warnings import warn
import Bio.Data.CodonTable
import numpy as np
from Bio.Seq import Seq
# create the genetic_codes and codons_for_aa dicts
genetic_codes = {}
codons_for_aa = {}
synonymous_codons = {}
for code_id, genetic_code in Bio.Data.CodonTable.unambiguous_dna_by_id.items():
# create genetic_codes dict
# Ex: {'TTT': 'F', 'TTC': 'F', 'TTA': 'L'...
table = genetic_code.forward_table
for codon in genetic_code.stop_codons:
table[codon] = "*"
genetic_codes[code_id] = table
# create codons_for_aa dict
# Ex: defaultdict(<class 'list'>, {'F': ['TTT', 'TTC']...
_codons_for_aa = defaultdict(list)
for key, value in table.items():
_codons_for_aa[value].append(key)
codons_for_aa[code_id] = dict(_codons_for_aa)
# create a list of synonymous_codons for each codon, including the original codon
# Ex: {'TTT': ['TTT', 'TTC']...
synonymous_codons[code_id] = {
codon: codons_for_aa[code_id][genetic_codes[code_id][codon]]
for codon in genetic_codes[code_id].keys()
}
def amino_acid_seq(length, frequencies):
"""Generates an amino acid sequence given frequencies of each amino acid.
Args:
length (int): The length of the amino acid sequence to generate.
frequencies (dict): A dictionary containing a mapping of each amino acid to its frequency.
Note:
The sum of all the values in ``frequencies`` must be 1.
Returns:
str: An amino acid sequence with the given frequencies.
Raises:
ValueError: When the length of the sequence is invalid or when the probabilities do not sum to 1.
Example:
>>> from Bio import SeqIO
>>> seq = SeqIO.read("beta_lactamase.fasta", "fasta").seq
>>> frequencies = k_mer_frequencies(seq, 1)
>>> amino_acid_seq(25, frequencies)
"""
if length <= 0:
raise ValueError("Length must be a positive integer")
sequence = ""
amino_acids, frequencies = zip(*frequencies[list(frequencies.keys())[0]].items())
for i in range(length):
sequence += np.random.choice(amino_acids, p=frequencies)
return sequence
def amino_acids_to_codons(aa_seq, codon_frequencies, genetic_code=11):
"""Generates a DNA representation of an amino acid sequence.
Args:
aa_seq (str): The amino acids to convert to DNA.
codon_frequencies (dict): A dictionary of codon frequencies for each amino acid. For each amino acid, the sum of the frequencies of its codons must be 1.
genetic_code (int, optional): The genetic code to use when converting to DNA. Defaults to 11, the standard genetic code.
Returns:
str: A DNA sequence with the given codon usage.
Example:
>>> from Bio import SeqIO
>>> seq = SeqIO.read("sequence.fasta", "fasta").seq
>>> frequencies = codon_frequencies(seq)
>>> amino_acids_to_codons("INQTEL", frequencies)
'ATAAATCAAACCGAACTT'
"""
codons_dict = codons_for_aa[genetic_code]
# generate the sequence
sequence = []
for aa in aa_seq:
try:
codons = codons_dict[aa]
sequence.append(
np.random.choice(
codons, p=[codon_frequencies[codon] for codon in codons]
)
)
except KeyError:
pass
if len(sequence) != len(aa_seq):
raise KeyError("Missing codon frequency.")
return "".join(sequence)
def codon_frequencies(seq, mode="absolute", genetic_code=11):
"""Calculates the frequency of each codon
Absolute mode is such that the total of the dictionary's values is equal to one.
Relative mode is such that the sum of each amino acid's codons' frequencies is equal to one.
Args:
seq (str or list): The DNA sequence(s).
mode (str, optional): One of "absolute" or "relative". Defaults to "absolute"
genetic_code (int, optional): The genetic code to use when converting to DNA. Defaults to 11, the standard genetic code.
Returns:
dict: The codon frequencies of each codon.
Raises:
ValueError: When the sequence length is not divisible into codons, i.e. when sequence length is not divisible by three.
Example:
>>> from Bio import SeqIO
>>> seq = SeqIO.read("sequence.fasta", "fasta").seq
>>> codon_frequencies(seq)
{'AAA': 0.016129032258064516,
'AAC': 0.016129032258064516,
'AAG': 0.016129032258064516,
'AAT': 0.016129032258064516,
'ACA': 0,
'ACC': 0.016129032258064516,
'ACG': 0,
'ACT': 0.016129032258064516,
'AGA': 0,
'AGC': 0,
'AGG': 0,
'AGT': 0,
'ATA': 0.016129032258064516,
'ATC': 0,
'ATG': 0.03225806451612903,
'ATT': 0,
'CAA': 0.04838709677419355,
'CAC': 0,
'CAG': 0,
'CAT': 0.03225806451612903,
'CCA': 0.03225806451612903,
'CCC': 0.016129032258064516,
'CCG': 0.04838709677419355,
'CCT': 0.03225806451612903,
'CGA': 0.016129032258064516,
'CGC': 0,
'CGG': 0.03225806451612903,
'CGT': 0,
'CTA': 0,
'CTC': 0,
'CTG': 0.03225806451612903,
'CTT': 0.0967741935483871,
'GAA': 0.016129032258064516,
'GAC': 0,
'GAG': 0,
'GAT': 0.016129032258064516,
'GCA': 0,
'GCC': 0.06451612903225806,
'GCG': 0.016129032258064516,
'GCT': 0,
'GGA': 0.03225806451612903,
'GGC': 0.016129032258064516,
'GGG': 0.016129032258064516,
'GGT': 0,
'GTA': 0,
'GTC': 0.016129032258064516,
'GTG': 0,
'GTT': 0.04838709677419355,
'TAA': 0,
'TAC': 0,
'TAG': 0,
'TAT': 0.03225806451612903,
'TCA': 0.016129032258064516,
'TCC': 0.03225806451612903,
'TCG': 0,
'TCT': 0.03225806451612903,
'TGA': 0,
'TGC': 0.016129032258064516,
'TGG': 0.016129032258064516,
'TGT': 0,
'TTA': 0,
'TTC': 0.04838709677419355,
'TTG': 0,
'TTT': 0.03225806451612903}
"""
if isinstance(seq, (list, tuple)):
for _seq in seq:
if (
len(_seq) % 3 != 0
): # check to ensure sequence contains only complete codons
raise ValueError("Sequence length must be divisible by 3.")
seq = "".join([str(seq) for seq in seq])
if len(seq) % 3 != 0: # check to ensure sequence contains only complete codons
raise ValueError("Sequence length must be divisible by 3.")
seq = str(seq).upper()
seq = [
seq[i : i + 3] for i in range(0, len(seq), 3)
] # slices the sequence into individual codons
codon_count = Counter(seq)
frequencies = {
key: (float(value) / len(seq)) for (key, value) in codon_count.items()
}
# collections.Counter returns a dictionary with counts of all the codons
# present. To ensure a 64-D vector, we make sure all codons are present in
# the dictionary.
for codon in genetic_codes[genetic_code]:
try:
frequencies[codon]
except KeyError:
frequencies[codon] = 0
if mode == "absolute":
return frequencies
if mode == "relative":
relative = {}
for i in synonymous_codons[genetic_code].keys():
try:
relative[i] = frequencies[i] / sum(
(frequencies[codon] for codon in synonymous_codons[genetic_code][i])
) # divide the occurence of a codon by the total number of its synonyms
except ZeroDivisionError:
relative[i] = 1 / len(
synonymous_codons[genetic_code][i]
) # if an amino acid is never used in the reference set, then all its codons are used equally
return relative
else:
raise ValueError("Mode must be either absolute or relative.")
def k_mers(seq, k):
"""Yields all *k*-mers in the input sequence with repeats.
Args:
seq (str): The sequence for which to generate *k*-mers.
k (int): the length of the *k*-mers.
Yields:
str: the next *k*-mer
Raises:
ValueError: When the value of *k* is less than the length of the sequence, k <= 0, or len(seq) is 0.
Example:
>>> list(k_mers("GATTACA", 1))
['G', 'A', 'T', 'T', 'A', 'C', 'A']
>>> list(k_mers("GATTACA", 2))
['GA', 'AT', 'TT', 'TA', 'AC', 'CA']
>>> list(k_mers("GATTACA", 3))
['GAT', 'ATT', 'TTA', 'TAC', 'ACA']
>>> list(k_mers("GATTACA", 4))
['GATT', 'ATTA', 'TTAC', 'TACA']
>>> k_mers("GATTACA", 4)
<generator object k_mers at 0x10831d258>
"""
# error checking
if k > len(seq):
raise ValueError(
"k (%i) may not be less then length of seq (%i)." % (k, len(seq))
)
elif not seq:
raise ValueError("seq length may not be zero")
elif k <= 0:
raise ValueError("k may not be <= zero")
it = iter(seq)
result = tuple(islice(it, k))
if len(result) == k:
yield "".join(result)
for elem in it:
result = result[1:] + (elem,)
yield "".join(result)
def k_mer_frequencies(
seq, k, include_missing=True, vector=False, codons=False, genetic_code=11
):
"""Calculates relative frequencies of each *k*-mer in the sequence.
Args:
seq (str or list): The sequence(s) to for which to generate *k*-mer frequencies.
k (int or list): the length of the *k*-mer(s).
include_missing (bool, optional): If True, include missing *k*-mers as having a frequency of 0. Only supports DNA *k*-mers. Defaults to False.
vector (bool, optional): Return a 1-D Numpy array of the *k*-mer frequencies, ordered by *k*-mers alphabetically. If True, ``include_missing`` must also be True. Defaults to False.
codons (bool, optional): Whether to include a codon usage entry in the resulting dictionary. Defaults to False.
genetic_code (int, optional): The genetic code to use when converting to DNA. Defaults to 11, the standard genetic code.
Returns:
dict: A dict in which the keys are *k* values and the values are dictionaries mapping *k*-mers to floats of their frequencies.
Raises:
ValueError: When an invalid value of k is provided or ``include_missing`` is False and ``vector`` is True.
ValueError: When ``codons`` and ``vector`` are both True.
ValueError: When ``k`` or ``seq`` is not provided.
Example:
>>> k_mer_frequencies("INQTEL", 1, include_missing=False)
{1: {'E': 0.16666666666666666,
'I': 0.16666666666666666,
'L': 0.16666666666666666,
'N': 0.16666666666666666,
'Q': 0.16666666666666666,
'T': 0.16666666666666666}}
>>> k_mer_frequencies("GATGATGGC", [1, 2], include_missing=False)
{1: {'A': 0.2222222222222222,
'C': 0.1111111111111111,
'G': 0.4444444444444444,
'T': 0.2222222222222222},
2: {'AT': 0.25, 'GA': 0.25, 'GC': 0.125, 'GG': 0.125, 'TG': 0.25}}
>>> k_mer_frequencies(["A", "T"], 1, include_missing=False)
{1: {'A': 0.5, 'T': 0.5}}
>>> k_mer_frequencies("GATGATGGC", 2, include_missing=True)
{2: {'AA': 0,
'AC': 0,
'AG': 0,
'AT': 0.25,
'CA': 0,
'CC': 0,
'CG': 0,
'CT': 0,
'GA': 0.25,
'GC': 0.125,
'GG': 0.125,
'GT': 0,
'TA': 0,
'TC': 0,
'TG': 0.25,
'TT': 0}}
>>> k_mer_frequencies("GATGATGGC", 2, include_missing=True, vector=True)
array([0. , 0. , 0. , 0.25 , 0. , 0. , 0. , 0. , 0.25 ,
0.125, 0.125, 0. , 0. , 0. , 0.25 , 0. ])
"""
if not include_missing and vector:
raise ValueError("May not create vector without including missing kmers.")
elif not k: # for when k == 0 or []
raise ValueError("Must provide a value for k")
elif not seq: # for when seq == ""
raise ValueError("Must provide seq(s)")
elif codons and vector:
raise ValueError("Cannot vectorize codons.")
# ensure there is a list of k values, even if it only has one element
if not isinstance(k, Iterable):
k = [k]
else:
k = sorted(k)
output = {}
# ditto for sequence(s)
if isinstance(seq, (str, bytes, Seq)):
seq = [seq]
for _k in k:
# check the value of k
if _k < 1:
raise ValueError("Invalid value of k. May not be less than 1.")
# get all the k-mers for the seqs
_seqs = []
for _seq in [list(k_mers(_seq.upper(), _k)) for _seq in seq]:
_seqs.extend(_seq)
# determine their frequencies
count = Counter(_seqs)
total_k_mer_count = sum(count.values())
frequencies = {
k_mer: value / total_k_mer_count for k_mer, value in count.items()
}
if include_missing:
defaults = {"".join(x): 0 for x in product("ATGC", repeat=_k)}
frequencies = {**defaults, **frequencies}
if vector:
frequencies = sorted(list(frequencies.items()), key=lambda x: x[0])
frequencies = np.fromiter(
(x[1] for x in frequencies), float, count=len(frequencies)
)
output[_k] = frequencies
if vector:
return np.array(list(chain.from_iterable([output[_k] for _k in k])))
# syntactic sugar to make capturing codon usage easier
if codons:
output["codons"] = codon_frequencies(seq, genetic_code=genetic_code)
return output
|
import numpy as np
import pandas as pd
def plot_turbine(HR_coord,power):
for i in range(HR_coord.shape[0]):
color_value = power[i,:]/np.max(power)
point1 = [HR_coord[i,0]-1.5*np.sin(HR_coord[i,2]),HR_coord[i,1]+1.5*np.cos(HR_coord[i,2])]
point2 = [HR_coord[i,0]+1.5*np.sin(HR_coord[i,2]),HR_coord[i,1]-1.5*np.cos(HR_coord[i,2])]
# plt.plot(HR_coord[i,0],HR_coord[i,1],'ko')
plt.plot([point1[0],point2[0]],[point1[1],point2[1]],color=((color_value,0,1)),linewidth=4)
x_coord = np.linspace(0,63,10)
y_coord = np.zeros(10)
HR_coord = np.zeros([2,10,8])
for i in range(8):
HR_coord[0,:,i] = x_coord-7*np.sin(np.pi/180*7)*i
HR_coord[1,:,i] = y_coord+7*np.cos(np.pi/180*7)*i
HR_coord = np.reshape(HR_coord,[2,80]).T
z_coord = np.zeros([80,1])+70
yaw = np.zeros([80,1])
power = np.random.rand(80,1)
HR_coord = np.append(HR_coord,z_coord,axis=1)
HR_coord = np.append(HR_coord,yaw,axis=1)
# plot_turbine(HR_coord,power)
# plt.show()
D = 80
Displacement = np.tile([2560,2560], (80, 1))
HR_coord[:,0:2] = HR_coord[:,0:2]*D+Displacement
df = pd.DataFrame(HR_coord)
df.columns = ["x", "y","z", "gamma"]
df.to_csv("./HornsRev.dat",index=None)
print(HR_coord) |
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db=SQLAlchemy()
class Flight(db.Model):
__tablename__="flights"
id = db.Column(db.Integer,primary_key=True)
origin = db.Column(db.String,nullable=False)
destination = db.Column(db.String,nullable=False)
duration = db.Column(db.Integer,nullable=False)
def say(self):
pass
|
import csv
import os
import difflib
import statistics
import matplotlib.pyplot as plt
SMALL_SIZE = 12
MEDIUM_SIZE = 14
LARGE_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('title', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=LARGE_SIZE, titlesize=LARGE_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
base_dir = "/Users/ewcss/data/ssbt/20220211_benchmark"
methods = {"GGA": ["PBE", "PBE-D3(BJ)", "BLYP", "BLYP-D3(BJ)", "B97-D", "B97-D3", "mPW91", "mPW91-D3(BJ)", "VV10", "rVV10"],
"meta-GGA": ["M06-L", "M06-L-D3(0)", "SCAN", "SCAN-D3(BJ)", "TPSS", "TPSS-D3(BJ)", "MN12-L", "MN12-L-D3(BJ)", "B97M-rV"],
"hybrid GGA": ["PBE0", "PBE0-D3(BJ)", "B3LYP", "B3LYP-D3(BJ)", "CAM-B3LYP", "CAM-B3LYP-D3(0)", "mPW1PW91", "mPW1PW91-D3(BJ)", "wB97X", "wB97XD", "wB97XD3", "wB97XV"],
"hybrid meta-GGA": ["M06-2X", "M06-2X-D3(0)", "M06-HF", "M08-SO", "M11", "MN15", "BMK", "BMK-D3(BJ)", "TPSSh", "TPSSh-D3(BJ)", "SCAN0", "mPWB1K", "mPWB1K-D3(BJ)", "wB97M-V"]}
vac_mae = {x: dict() for x in methods}
vac_rel = {x: dict() for x in methods}
pcm_mae = {x: dict() for x in methods}
pcm_rel = {x: dict() for x in methods}
with open(os.path.join(base_dir, "abserrs_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
if funct == "M06-HF" or funct == "B3LYP":
continue
avg = float(row[-1])
for group, functs in methods.items():
if funct in functs:
vac_mae[group][funct] = avg
with open(os.path.join(base_dir, "abserrs_rel_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
avg = float(row[-1])
if funct == "M06-HF" or funct == "B3LYP":
continue
for group, functs in methods.items():
if funct in functs:
vac_rel[group][funct] = avg
# with open(os.path.join(base_dir, "abserrs_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_mae[group][funct] = avg
#
# with open(os.path.join(base_dir, "abserrs_rel_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_rel[group][funct] = avg
fig, axs = plt.subplots(2, 1, figsize=(8, 6), sharex=True)
for i, dset in enumerate([vac_mae, vac_rel]):
ax = axs[i]
if i == 0:
ax.set_ylabel("MAE (eV)")
else:
ax.set_ylabel("MRAE (unitless)")
xs = ["GGA", "meta-GGA", "hybrid GGA", "hybrid meta-GGA"]
avgs = list()
lowlims = list()
uplims = list()
data = list()
for group in xs:
data.append(sorted(list(dset[group].values())))
avg = statistics.mean(dset[group].values())
avgs.append(avg)
group_sort = sorted(dset[group].items(), key=lambda x: x[1])
print("\t min: {} ({}) max: {} ({}) avg: {}".format(group_sort[0][0], group_sort[0][1],
group_sort[-1][0], group_sort[-1][1],
avg))
lowlims.append(abs(avg - group_sort[0][1]))
uplims.append(abs(avg - group_sort[-1][1]))
# ax.bar(x=xs, height=avgs, yerr=[lowlims, uplims], color=["#ff595e", "#ffca3a", "#8ac926", "#1982c4"])
# ax.bar(x=range(1, len(xs) + 1), height=avgs, tick_label=xs, color=["#ff595e", "#ffca3a", "#8ac926", "#1982c4"], align="center")
# ax.set_xticklabels(xs, rotation=30, ha="right")
# ax2 = ax.twinx()
bp = ax.boxplot(data, labels=xs, patch_artist=True)
for patch, color in zip(bp['boxes'], ["#ff595e", "#ffca3a", "#8ac926", "#1982c4"]):
patch.set_facecolor(color)
for median in bp['medians']:
median.set(color='black')
# ax.set_xticks(rotation=30, ha="right")
# ax.set_xticklabels(xs, rotation=30, ha="right")
plt.tight_layout()
# fig.savefig("average_performance_sp_box.png", dpi=150)
#
# plt.show() |
import re
import sys
sys.path.append("../../../..")
from text import *
#import importlib
#import os
#import pdb
#----------------------------------------------------------------------------------------------------
text = Text("../../../../testData/harryMosesDaylight/daylight_1_4.eaf",
"../../../../testData/harryMosesDaylight/audioPhrases",
grammaticalTermsFile="../../../../testData/harryMosesDaylight/grammaticalTerms.txt",
tierGuideFile="../../../../testData/harryMosesDaylight/tierGuide.yaml")
|
# proxy module
from kiva.trait_defs.kiva_font_trait import *
|
import pandas as pd
def read_table(path):
'''
Read a csv file and return a DataFrame object
Params:
path(str): file's path
'''
df = pd.read_csv(path, encoding='utf-8')
return df
|
# Written by Kamran Bigdely
# Example for Compose Methods: Extract Method.
import math
def print_stat():
'''Prints Statistics'''
grade_list = []
# Get the inputs from the user
n_student = 5
for _ in range(0, n_student):
grade_list.append(int(input('Enter a number: ')))
mean = calculate_mean(grade_list)
sd = calculate_std(grade_list, mean)
# print out the mean and standard deviation in a nice format.
print('****** Grade Statistics ******')
print("The grades's mean is:", mean)
print('The population standard deviation of grades is: ', round(sd, 3))
print('****** END ******')
return grade_list
def calculate_mean(grade_list):
'''Calculates means given a list'''
# Calculate the mean and standard deviation of the grades
sum_grades = 0 # Do you think 'sum' is a good var name? Run pylint to figure out!
for grade in grade_list:
sum_grades = sum_grades + grade
mean = sum_grades / len(grade_list)
return mean
def calculate_std(grade_list, mean):
'''Calculates STD'''
sd = 0 # standard deviation
sum_of_sqrs = 0
for grade in grade_list:
sum_of_sqrs += (grade - mean) ** 2
sd = math.sqrt(sum_of_sqrs / len(grade_list))
return sd
|
#!c:\users\whq672437089\envs\engr597\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
import os
path = "score.txt"
res = 0
cnt = 0
with open(path) as f:
string = f.readlines()
for line in string:
res += float(line.split()[2])
cnt += 1
print(f"合計: {res}")
print(f"平均: {res / cnt}")
|
# Generated by Django 3.0.5 on 2020-05-01 08:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dictionary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=50)),
('meaning', models.TextField(verbose_name='Definition')),
],
),
]
|
from random import choice
import requests
'''
this simple funciton in py language return a list with very random names
'''
def main() -> list:
names = []
names_and_surname = []
for x in range(0, 10):
api = requests.get('http://gerador-nomes.herokuapp.com/nomes/10')
for name in api.json():
names.append(name)
for n in range(0, 200):
n1 = choice(names)
n2 = choice(names)
names_and_surname.append(n1 + ' ' + n2)
return names_and_surname
names = main()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#import usefull stuff
import csv
#defining the analysis function
def ymaze():
with open("ymazeanalysis.csv", mode="a", newline="") as f:
writer = csv.writer(f, delimiter=",")
print("enter the animal code, then the abc string")
animal = input("Enter animal code: ")
str = input("Enter the abc string, no spaces, from y-maze: ")
if str.isalpha():
if "a" and "b" and "c" in str:
print ("you have inserted an alphabetic string, no spaces, as asked \n")
str_counta = str.count('a')
str_countb = str.count('b')
str_countc = str.count('c')
totnumentr=str_counta+str_countb+str_countc
percA=(str_counta/totnumentr)*100
percB=(str_countb/totnumentr)*100
percC=(str_countc/totnumentr)*100
#counting alter
str_count1 = str.count('abc')
str_count2 = str.count('bca')
str_count3 = str.count('bac')
str_count4 = str.count('acb')
str_count5 = str.count('cab')
str_count6 = str.count('cba')
alt=str_count1+ str_count2+str_count3+str_count4+str_count5+str_count6
altentr = 100*(alt/(totnumentr-2))
writer.writerow([animal, str, totnumentr, alt, altentr])
print("you have entered {} = {} \n".format(animal, str))
#create the csv file and write headers
with open("ymazeanalysis.csv", mode="w", newline="") as f:
writer = csv.writer(f, delimiter=",")
writer.writerow(["Animal Code", "abc String from Y maze test", "Total number of entries", "Number of correct alternation", "Spontaneous Alternation %"])
#main loop
active = True
while active:
user = input("type n to add a new animal/string or type quit to exit the program: \n").lower()
if user == "n":
ymaze()
elif user == "quit":
active = False
else:
print ("Enter either the word quit or n: \n")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import mock
from openstackclient.network import common
from openstackclient.tests import utils
def _add_common_argument(parser):
parser.add_argument(
'common',
metavar='<common>',
help='Common argument',
)
return parser
def _add_network_argument(parser):
parser.add_argument(
'network',
metavar='<network>',
help='Network argument',
)
return parser
def _add_compute_argument(parser):
parser.add_argument(
'compute',
metavar='<compute>',
help='Compute argument',
)
return parser
class FakeNetworkAndComputeCommand(common.NetworkAndComputeCommand):
def update_parser_common(self, parser):
return _add_common_argument(parser)
def update_parser_network(self, parser):
return _add_network_argument(parser)
def update_parser_compute(self, parser):
return _add_compute_argument(parser)
def take_action_network(self, client, parsed_args):
return client.network_action(parsed_args)
def take_action_compute(self, client, parsed_args):
return client.compute_action(parsed_args)
class FakeNetworkAndComputeLister(common.NetworkAndComputeLister):
def update_parser_common(self, parser):
return _add_common_argument(parser)
def update_parser_network(self, parser):
return _add_network_argument(parser)
def update_parser_compute(self, parser):
return _add_compute_argument(parser)
def take_action_network(self, client, parsed_args):
return client.network_action(parsed_args)
def take_action_compute(self, client, parsed_args):
return client.compute_action(parsed_args)
class FakeNetworkAndComputeShowOne(common.NetworkAndComputeShowOne):
def update_parser_common(self, parser):
return _add_common_argument(parser)
def update_parser_network(self, parser):
return _add_network_argument(parser)
def update_parser_compute(self, parser):
return _add_compute_argument(parser)
def take_action_network(self, client, parsed_args):
return client.network_action(parsed_args)
def take_action_compute(self, client, parsed_args):
return client.compute_action(parsed_args)
class TestNetworkAndCompute(utils.TestCommand):
def setUp(self):
super(TestNetworkAndCompute, self).setUp()
self.namespace = argparse.Namespace()
# Create network client mocks.
self.app.client_manager.network = mock.Mock()
self.network = self.app.client_manager.network
self.network.network_action = mock.Mock(
return_value='take_action_network')
# Create compute client mocks.
self.app.client_manager.compute = mock.Mock()
self.compute = self.app.client_manager.compute
self.compute.compute_action = mock.Mock(
return_value='take_action_compute')
# Subclasses can override the command object to test.
self.cmd = FakeNetworkAndComputeCommand(self.app, self.namespace)
def test_take_action_network(self):
arglist = [
'common',
'network'
]
verifylist = [
('common', 'common'),
('network', 'network')
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.network.network_action.assert_called_with(parsed_args)
self.assertEqual('take_action_network', result)
def test_take_action_compute(self):
arglist = [
'common',
'compute'
]
verifylist = [
('common', 'common'),
('compute', 'compute')
]
self.app.client_manager.network_endpoint_enabled = False
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.compute.compute_action.assert_called_with(parsed_args)
self.assertEqual('take_action_compute', result)
class TestNetworkAndComputeCommand(TestNetworkAndCompute):
def setUp(self):
super(TestNetworkAndComputeCommand, self).setUp()
self.cmd = FakeNetworkAndComputeCommand(self.app, self.namespace)
class TestNetworkAndComputeLister(TestNetworkAndCompute):
def setUp(self):
super(TestNetworkAndComputeLister, self).setUp()
self.cmd = FakeNetworkAndComputeLister(self.app, self.namespace)
class TestNetworkAndComputeShowOne(TestNetworkAndCompute):
def setUp(self):
super(TestNetworkAndComputeShowOne, self).setUp()
self.cmd = FakeNetworkAndComputeShowOne(self.app, self.namespace)
|
# -*- coding: utf-8 -*-
# This module contains some defaults for the logging system.
import logging
import sys
LOG_FORMATTER = logging.Formatter(
"%(asctime)s :: %(name)s :: %(levelname)-7s :: %(message)s",
datefmt='%a, %d %b %Y %H:%M:%S')
CONSOLE_HANDLER = logging.StreamHandler(sys.stdout)
CONSOLE_HANDLER.setLevel(logging.DEBUG)
CONSOLE_HANDLER.setFormatter(LOG_FORMATTER)
ROOT_LOGGER = logging.getLogger('')
ROOT_LOGGER.setLevel(logging.DEBUG) # Default logging level for library work.
ROOT_LOGGER.addHandler(CONSOLE_HANDLER)
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL', 'CRITICAL']
for level in LOG_LEVELS:
vars()[level] = getattr(logging, level)
global get_logger
get_logger = logging.getLogger
def set_level(level):
ROOT_LOGGER.setLevel(getattr(logging, level))
def silence():
global get_logger
get_logger = lambda name: NullLogger()
ROOT_LOGGER.setLevel(float('inf'))
class NullLogger(object):
def __getattr__(self, attr):
if attr.upper() in LOG_LEVELS:
return lambda *args, **kwargs: None
|
import tkinter as tk
from tkinter import messagebox
window = tk.Tk()
window.title("Tela de exemplo")
#funções
def subtracao():
subtracao = float(ent1.get())-float(ent2.get())
messagebox.showinfo('Resultado', "A subtracao deu: {}".format(subtracao))
#componentes:
lbl1 = tk.Label(window, text = "Numero 1: ")
lbl2 = tk.Label(window, text = "Numero 2: ")
ent1 = tk.Entry(window)
ent2 = tk.Entry(window)
btn = tk.Button(window, text = "diminuir", command = subtracao)
#organiza os compontentes na tela :
lbl1.grid(column = 0, row = 0)
lbl2.grid(column = 0, row = 1)
ent1.grid(column = 1, row = 0)
ent2.grid(column = 1, row = 1)
btn.grid(column = 0, row = 2)
window.mainloop() |
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy.testing import assert_array_equal
import threading
import sys
if sys.version_info[0] >= 3:
import queue
else:
import Queue as queue
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift(TestCase):
def test_fft_n(self):
self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D(TestCase):
def test_basic(self):
rand = np.random.random
x = rand(30) + 1j*rand(30)
assert_array_almost_equal(fft1(x), np.fft.fft(x))
class TestFFTThreadSafe(TestCase):
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
[x.join() for x in t]
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
if __name__ == "__main__":
run_module_suite()
|
import pytest
from unittest.mock import mock_open
from yml_reader import YmlReader
@pytest.fixture
def unit_under_test(mocker):
return YmlReader("test_file")
def test_read_file_exists(unit_under_test, mocker):
mocker.patch('os.path.isfile', return_value=True)
mocker.patch('builtins.open', mock_open())
mocker.patch('yaml.safe_load', return_value="yaml_content")
yaml_content = unit_under_test.read()
assert yaml_content == "yaml_content"
def test_read_file_does_not_exists(unit_under_test, mocker):
mocker.patch('os.path.isfile', return_value=False)
with pytest.raises(FileNotFoundError, match='file: test_file not found!'):
unit_under_test.read()
|
def arithmetic_arranger(problems, print_ans = False):
count = 1
if len(problems)>5:
return "Error: Too many problems."
line_1 = ""
line_2 = ""
dash_line = ""
ans_line = ""
for problem in problems:
chars = problem.split()
try :
num1 = int(chars[0])
operator = chars[1]
num2 = int(chars[2])
except :
return "Error: Numbers must only contain digits."
if len(chars[0])>4 or len(chars[2])>4 :
return "Error: Numbers cannot be more than four digits."
if chars[1] not in ["+", "-"]:
return "Error: Operator must be '+' or '-'."
if print_ans:
if operator == "+":
answer = num1 + num2
else:
answer = num1 - num2
maxx = num1
if num2>num1:
maxx = num2
str_num1 = str(num1).rjust(len(str(maxx))+2)
str_num2 = operator + str(num2).rjust(len(str(maxx))+1)
line_1 = line_1 + str_num1
line_2 = line_2 + str_num2
dash_line = dash_line + ("-"*(2+len(str(maxx))))
if print_ans:
ans_line = ans_line + str(answer).rjust(len(str(maxx))+2)
if count<len(problems) :
line_1 += " "
line_2 += " "
dash_line += " "
if print_ans:
ans_line += " "
count += 1
if print_ans:
arranged_problems = line_1 + "\n" + line_2 + "\n" + dash_line + "\n" + ans_line
else:
arranged_problems = line_1 + "\n" + line_2 + "\n" + dash_line
return arranged_problems |
import os
os.path.getmtime("aspiration.pdf")
#time stamp, unix jan first 1970 |
#!/usr/bin/env python3
import hashlib
import os
import datetime
import argparse
import time
def size_of_file(full_path_file):
return int(os.path.getsize(full_path_file))
def hash_of_file(full_path_file):
block_size = 512 * 1024 * 1024 # 512 MB
md5 = hashlib.md5()
with open(full_path_file, "rb") as file_calculating_hash:
file_buffer = file_calculating_hash.read(block_size)
while len(file_buffer) > 0:
md5.update(file_buffer)
file_buffer = file_calculating_hash.read(block_size)
return md5.hexdigest()
def calculate_hashes(directory, output_file):
output = open(output_file, "a")
if not directory.endswith("/"):
directory += "/"
count = 0
print("Starting: " + directory + " at " + str(datetime.datetime.now()))
for root, dirs, files in os.walk(directory):
count += 1
for dir in dirs:
full_path_dir = os.path.join(root, dir)
relative_dir = full_path_dir[len(directory):]
output.write(relative_dir + "\t0\td41d8cd98f00b204e9800998ecf8427e\n")
for file in files:
full_path_file = os.path.join(root, file)
relative_file = full_path_file[len(directory):]
output.write(relative_file + "\t" + str(size_of_file(full_path_file)) + "\t" + hash_of_file(full_path_file) + "\n")
if count == 10000:
print("Processing " + directory + "Number of files done: " + str(count))
output.close()
def main():
parser = argparse.ArgumentParser(description="This script writes OUTPUT_FILE with list of file names, sizes (bytes) and MD5 checksums from BASE_DIRECTORY. MD5 is used for object storage (e.g. Amazon S3) compatibility")
parser.add_argument("base_directory", help="Root directory of the list of files")
parser.add_argument("output_file", help="Output file: each line contains the name of the file, file size (bytes) and MD5 hash")
args = parser.parse_args()
start_time = time.time()
calculate_hashes(args.base_directory, args.output_file)
end_time = time.time()
print("Total time in hours {0:.2f}: ", (end_time - start_time) / 3600)
print("Output file:" , args.output_file)
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.10 on 2021-04-21 08:11
from django.db import migrations, models
import django.db.models.deletion
import muni_portal.core.models
class Migration(migrations.Migration):
dependencies = [
("core", "0037_auto_20210326_0554"),
]
operations = [
migrations.AlterField(
model_name="servicerequest",
name="status",
field=models.CharField(
choices=[
("queued", "Queued"),
("created", "Created"),
("assigned", "Assigned"),
("completed", "Completed"),
],
default="queued",
max_length=254,
),
),
migrations.CreateModel(
name="ServiceRequestAttachment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date_created", models.DateTimeField(auto_now_add=True)),
(
"file",
models.FileField(
upload_to=muni_portal.core.models.service_request_attachment_file_path
),
),
("exists_on_collaborator", models.BooleanField(default=False)),
(
"service_request",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="images",
to="core.ServiceRequest",
),
),
],
),
]
|
# Generated by Django 3.0.5 on 2020-07-24 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quotas', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userquota',
name='max_file_size',
field=models.BigIntegerField(default=10737418240),
),
migrations.AlterField(
model_name='userquota',
name='total_space_per_project',
field=models.BigIntegerField(default=26843545600),
),
]
|
#author:rujia
#website:www.rujia.uk
#version:1.0
from flask import session,request
from . import app
from core import blind_demo
from core import until
@app.route("/setParamsUser", methods=['GET'])
def setParamsUser():
try:
uid = until.get_mac_address()
secp = session.get(uid+'secp')
if secp == 'secp256k1':
params = blind_demo.choose_parameters_secp256k1()
elif secp == 'secp192k1':
params = blind_demo.choose_parameters_secp192k1()
t1, t2, t3, t4, t5 = blind_demo.get_random_ZR(params.group),blind_demo.get_random_ZR(params.group),blind_demo.get_random_ZR(params.group),blind_demo.get_random_ZR(params.group),blind_demo.get_random_ZR(params.group)
until.putBytesToSession(uid+'t1_bytes',t1, params.group)
until.putBytesToSession(uid+'t2_bytes',t2, params.group)
until.putBytesToSession(uid+'t3_bytes',t3, params.group)
until.putBytesToSession(uid+'t4_bytes',t4, params.group)
until.putBytesToSession(uid+'t5_bytes',t5, params.group)
rjson = str(t1) + '#' + str(t2) + '#' + str(t3) + '#' + str(t4) + '#' + str(t5)
return rjson
except Exception as e:
print(e)
return "0"
@app.route("/userExecuteOne", methods=['POST'])
def userExecuteOne():
try:
user = getUserObj()
uid = until.get_mac_address()
orig_z = until.getObjFromSession(uid+'z_bytes',user.parameters.group)
orig_gamma = until.getObjFromSession(uid+'gamma_bytes',user.parameters.group)
#z = until.point2Obj(39972701138670676199833069686228758314468515773606341811291326318149542933708, user.parameters.group)
#gamma = until.unmber2Obj(67950487539194343191118481106849796541317734897587234319291961516724185583159, user.parameters.group)
zu = user.protocol_one(orig_z, orig_gamma)
until.putBytesToSession(uid+'zu_bytes', zu, user.parameters.group)
rjson = str(user.UserKeypair.gamma) + '#' + str(user.UserKeypair.xi)+ '#' + str(orig_z) + '#' + str(zu)
return rjson
except Exception as e1:
print(e1)
return "0"
@app.route("/userExecuteThree", methods=['POST'])
def userExecuteThree():
try:
uid = until.get_mac_address()
m = str(request.form['m'])
m = bytes(m,'utf-8')
session[uid+'m'] = m
user = getUserObj()
orig_z1 = until.getObjFromSession(uid+'z1_bytes',user.parameters.group)
orig_a = until.getObjFromSession(uid+'a_bytes',user.parameters.group)
orig_b1 = until.getObjFromSession(uid+'b1_bytes',user.parameters.group)
orig_b2 = until.getObjFromSession(uid+'b2_bytes',user.parameters.group)
orig_t1 = until.getObjFromSession(uid+'t1_bytes',user.parameters.group)
orig_t2 = until.getObjFromSession(uid+'t2_bytes',user.parameters.group)
orig_t3 = until.getObjFromSession(uid+'t3_bytes',user.parameters.group)
orig_t4 = until.getObjFromSession(uid+'t4_bytes',user.parameters.group)
orig_t5 = until.getObjFromSession(uid+'t5_bytes',user.parameters.group)
orig_y = until.getObjFromSession(uid+'y_bytes',user.parameters.group)
zeta1, zeta2, alpha, beta1, beta2, epsilon, e = user.protocol_three(orig_z1, orig_a, orig_b1, orig_b2, m, orig_y,orig_t1, orig_t2, orig_t3, orig_t4, orig_t5)
until.putBytesToSession(uid+'zeta1_bytes',zeta1, user.parameters.group)
until.putBytesToSession(uid+'zeta2_bytes',zeta2, user.parameters.group)
until.putBytesToSession(uid+'alpha_bytes',alpha, user.parameters.group)
until.putBytesToSession(uid+'beta1_bytes',beta1, user.parameters.group)
until.putBytesToSession(uid+'beta2_bytes',beta2, user.parameters.group)
until.putBytesToSession(uid+'epsilon_bytes',epsilon, user.parameters.group)
until.putBytesToSession(uid+'e_bytes',e, user.parameters.group)
rjson = str(zeta1) + '#' + str(zeta2) + '#' + str(alpha)+ '#' + str(beta1)+ '#' + str(beta2)+ '#' + str(epsilon)+ '#' + str(e)
return rjson
except Exception as exception:
print(exception)
return "0"
@app.route("/userExecuteFive", methods=['GET'])
def userExecuteFive():
try:
user = getUserObj()
uid = until.get_mac_address()
orig_r = until.getObjFromSession(uid+'r_bytes',user.parameters.group)
orig_c = until.getObjFromSession(uid+'c_bytes',user.parameters.group)
orig_s1 = until.getObjFromSession(uid+'s1_bytes',user.parameters.group)
orig_s2 = until.getObjFromSession(uid+'s2_bytes',user.parameters.group)
orig_d = until.getObjFromSession(uid+'d_bytes',user.parameters.group)
orig_t1 = until.getObjFromSession(uid+'t1_bytes',user.parameters.group)
orig_t2 = until.getObjFromSession(uid+'t2_bytes',user.parameters.group)
orig_t3 = until.getObjFromSession(uid+'t3_bytes',user.parameters.group)
orig_t4 = until.getObjFromSession(uid+'t4_bytes',user.parameters.group)
orig_t5 = until.getObjFromSession(uid+'t5_bytes',user.parameters.group)
rho, omega, sigma1, sigma2, delta = user.protocol_five(orig_r, orig_c, orig_s1, orig_s2, orig_d, orig_t1, orig_t2, orig_t3, orig_t4, orig_t5)
until.putBytesToSession(uid+'rho_bytes',rho, user.parameters.group)
until.putBytesToSession(uid+'omega_bytes',omega, user.parameters.group)
until.putBytesToSession(uid+'sigma1_bytes',sigma1, user.parameters.group)
until.putBytesToSession(uid+'sigma2_bytes',sigma2, user.parameters.group)
until.putBytesToSession(uid+'delta_bytes',delta, user.parameters.group)
rjson = str(rho) + '#' + str(omega) + '#' + str(sigma1) + '#' + str(sigma2) + '#' + str(delta)
return rjson
except Exception as e1:
print(e1)
return "0"
def getUserObj():
try:
uid = until.get_mac_address()
secp = session.get(uid+'secp')
if secp == 'secp256k1':
params = blind_demo.choose_parameters_secp256k1()
elif secp == 'secp192k1':
params = blind_demo.choose_parameters_secp192k1()
if session.get(uid+'g_bytes')!=None:
orig_g = until.getObjFromSession(uid+'g_bytes',params.group)
if session.get(uid+'h_bytes')!=None:
orig_h = until.getObjFromSession(uid+'h_bytes',params.group)
if session.get(uid+'z_bytes')!=None:
orig_z = until.getObjFromSession(uid+'z_bytes',params.group)
if session.get(uid+'gamma_bytes')!=None:
orig_gamma = until.getObjFromSession(uid+'gamma_bytes',params.group)
if session.get(uid+'xi_bytes')!=None:
orig_xi = until.getObjFromSession(uid+'xi_bytes',params.group)
user = blind_demo.User(orig_g,orig_h,orig_z,orig_gamma,orig_xi,params)
"""
if session.get('t1_bytes')!=None:
orig_t1 = until.getObjFromSession('t1_bytes',params.group)
if session.get('t2_bytes')!=None:
orig_t2 = until.getObjFromSession('t2_bytes',params.group)
if session.get('t3_bytes')!=None:
orig_t3 = until.getObjFromSession('t3_bytes',params.group)
if session.get('t4_bytes')!=None:
orig_t4 = until.getObjFromSession('t4_bytes',params.group)
if session.get('t5_bytes')!=None:
orig_t5 = until.getObjFromSession('t5_bytes',params.group)
if session.get('y_bytes')!=None:
orig_y = until.getObjFromSession('y_bytes',params.group)
"""
return user
except Exception:
return None
|
#!/usr/bin/env python3
# ================================================================================
# -- File: day_05/05_code.py
# -- Project: advent-of-code-2020
# -- Project URL: https://adventofcode.com/2020/
# -- Create Date: 2020-12-05 18:41
# -- Author: moosploit
# -- Company: https://github.com/moosploit
# -- License: MIT License | http://www.opensource.org/licenses/MIT
# ================================================================================
import math
seats = {}
def find_seat(seat):
index = 0
row = -1
first_row = 0
last_row = 127
column = -1
first_col = 0
last_col = 7
mid = 0
while(index < len(seat)):
if (index <= 6):
mid = (first_row+last_row)/2
if(seat[index] == "F"):
last_row = math.floor(mid)
if (first_row == last_row):
row = last_row
elif(seat[index] == "B"):
first_row = math.ceil(mid)
if (first_row == last_row):
row = first_row
else:
mid = (first_col+last_col)/2
if(seat[index] == "R"):
first_col = math.ceil(mid)
if (first_col == last_col):
column = first_col
elif(seat[index] == "L"):
last_col = math.floor(mid)
if (first_col == last_col):
column = last_col
index += 1
seat_id = row * 8 + column
return seat_id, row, column
with open("05_data.txt") as file:
boardingpasses = file.readlines()
for boardingpass in boardingpasses:
seat_id, seat_row, seat_col = find_seat(boardingpass.strip())
seats[seat_id] = [seat_row, seat_col]
# === Part Two === /
for row in range(0, 127):
for col in range(0, 7):
sid = row * 8 + col
if((sid not in seats) and ((sid-1) in seats) and ((sid+1)) in seats):
my_seat = sid
print(f"\nPart One | Highest Seat ID: {max(seats)}")
print(f"\nPart Two | My Seat ID: {my_seat}")
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from riak import RiakClient as OrigRiakClient
from solar.dblayer.model import clear_cache
class RiakClient(OrigRiakClient):
def session_start(self):
clear_cache()
def session_end(self, result=True):
# ignore result
clear_cache()
def delete_all(self, cls):
for _ in xrange(10):
# riak dislikes deletes without dvv
rst = cls.bucket.get_index('$bucket',
startkey='_',
max_results=100000).results
for key in rst:
cls.bucket.delete(key)
else:
return
time.sleep(0.5)
|
import vast.io_mesh as io
import numpy as np
import h5py
import vast.volume_tools as vt
import vast.surface_tools as st
from functools import partial
import concurrent.futures
import os
import vast.math_helpers as mh
import time
class SurfaceVolumeMapper(object):
def __init__(self, white_surf=None, gray_surf=None, resolution=None, mask=None, dimensions=None,
origin=None, filename=None, save_in_absence=False ):
"""Class for mapping surface data to voxels
Always assumes axis order is xyz
Args:
white_surf/gray_surf : surface files to map to the volume.
resolution: specify voxel size in mm,
either a float for uniform voxels or 3 floats for non-uniform
mask: used to create bounding box which determines block.
If dimesions and origins are specified, these are used instead.
dimensions: size of block to create in voxel steps.
dimensions: size of volume block in voxel steps.
origin: Origin of volume (mm),
i.e. location of one corner in real world coordinates
filename: path to hdf5 file containing precomputed coordinates.
block attributes are checked against specified resolution,
dimensions and origin.
"""
#initialise block coordinates dictionary
self.volume_surf_coordinates={'voxel_coordinates':[],
'triangles':[],
'depths':[],
'triangle_coordinates':[]}
if filename is not None:
if os.path.isfile(filename):
print('loading precomputed coordinates from {}'.format(filename))
self.load_precomputed_coordinates(filename)
print('loaded header info:')
print('resolution: {}'.format(self.resolution))
print('dimensions: {}'.format(self.dimensions))
print('origin: {}'.format(self.origin))
print('We hope this file matches your expectations')
return
else:
print('precomputed coordinates file not found, recomputing...')
#check resolution is 1D or 3D
print(resolution)
if resolution is not None:
if isinstance(resolution, float):
self.resolution = np.array([resolution,resolution,resolution])
elif len(resolution)==3:
self.resolution = resolution
else:
NotImplementedError
print('loading surface meshes')
if white_surf is not None:
self.white_surface = io.load_mesh_geometry(white_surf)
self.triangles_to_include = self.white_surface['faces']
if gray_surf is not None:
self.gray_surface = io.load_mesh_geometry(gray_surf)
#check if mask. Calculate dimensions and origins from mask, unless these are specified.
print('masking triangles')
if mask is not None:
self.mask=mask
self.triangles_to_include = self.surface_mask_triangles(self.mask)
if np.logical_and(type(dimensions) is not np.ndarray,type(dimensions )is not list) or np.logical_and(type(origin) is not np.ndarray,
type(origin) is not list):
block_box = SurfaceVolumeMapper.bounding_box(np.vstack((self.gray_surface['coords'][mask],
self.white_surface['coords'][mask])))
self.dimensions = np.ceil((block_box[1]-block_box[0])/self.resolution).astype(int)
self.origin = block_box[0]
else:
self.dimensions = np.array(dimensions)
self.origin = np.array(origin)
# if no mask, use block to filter triangles down
else:
self.dimensions = np.array(dimensions)
self.origin = np.array(origin)
print(self.dimensions, self.resolution, self.origin)
self.max_dimension = self.origin + self.dimensions * self.resolution
print('Number of triangles in surface mask: {}'.format(len(self.triangles_to_include)))
#return
self.triangles_to_include = self.volume_mask_triangles(self.triangles_to_include)
print('Number of triangles after masking to volume block: {}'.format(len(self.triangles_to_include)))
#main function
print('calculating coordinates')
t1=time.time()
# self.calculate_volume_surf_coordinates()
t2=time.time()
self.volume_surf_coordinates={'voxel_coordinates':[],
'triangles':[],
'depths':[],
'triangle_coordinates':[]}
# print('non-parallel tool: ',t2-t1)
t2=time.time()
self.calculate_volume_surf_coordinates_parallel()
t3=time.time()
print('parallel tool: ',t3-t2)
#save file if filename was give, file did not exist and save_in_absence is True
if filename is not None and save_in_absence:
print('saving out coordinates to {}'.format(filename))
self.save_coordinates(filename)
#functions
def save_coordinates(self, filename):
"""save coordinates as hdf5 file
"""
f=h5py.File(filename, 'w')
f.attrs['resolution'] = self.resolution
f.attrs['origin'] = self.origin
f.attrs['dimensions'] = self.dimensions
coords_files=['voxel_coordinates','triangles', 'depths','triangle_coordinates']
for coords_file in coords_files:
dset = f.require_dataset( coords_file ,
shape = self.volume_surf_coordinates[coords_file].shape ,
dtype = self.volume_surf_coordinates[coords_file].dtype,
compression = "gzip", compression_opts = 9)
dset[:] = self.volume_surf_coordinates[coords_file]
f.close()
def load_precomputed_coordinates(self, filename):
"""load coordinates from hdf5 file"""
f=h5py.File(filename, 'r')
self.resolution = f.attrs['resolution']
self.origin = f.attrs['origin']
self.dimensions = f.attrs['dimensions']
coords_files=['voxel_coordinates','triangles', 'depths','triangle_coordinates']
for coords_file in coords_files:
self.volume_surf_coordinates[coords_file] = f[coords_file][:]
f.close()
def map_vector_to_block(self, vector_file, interpolation='linear'):
"""map values from vector file to voxel coordinates
interpolation between vertices can either be:
nearest neighbour or trilinear (weighted by barycentric)"""
block = np.zeros(self.dimensions)
tri_coords = self.volume_surf_coordinates['triangle_coordinates']
triangles=self.volume_surf_coordinates['triangles']
vc=self.volume_surf_coordinates['voxel_coordinates']
if interpolation == 'linear':
# interpolation
block[vc[:,0],vc[:,1],vc[:,2]] = np.einsum('ij,ij->i', tri_coords, vector_file[triangles])
elif 'nearest' in interpolation:
#nearest is the maximum of the 3 coordinates
nearest_index=triangles[tri_coords.max(axis=1,keepdims=1) == tri_coords]
block[vc[:,0],vc[:,1],vc[:,2]] = vector_file[nearest_index]
return block
def map_profiles_to_block(self, profiles, interpolation='linear'):
"""map values from vector file to voxel coordinates
interpolation between vertices can either be:
nearest neighbour or trilinear (weighted by barycentric)"""
block = np.zeros(self.dimensions)
tri_coords = self.volume_surf_coordinates['triangle_coordinates']
triangles=self.volume_surf_coordinates['triangles']
vc=self.volume_surf_coordinates['voxel_coordinates']
depths=np.round((profiles.shape[1]-1)*self.volume_surf_coordinates['depths']).astype(int)
print('reading depths')
triangle_values=np.array([profiles[triangles[:,0],depths[:]],profiles[triangles[:,1],depths[:]],profiles[triangles[:,2],depths[:]]]).T
print('writing to block')
if interpolation == 'linear':
block[vc[:,0],vc[:,1],vc[:,2]] = np.einsum('ij,ij->i', tri_coords, triangle_values)
elif 'nearest' in interpolation:
#nearest is the maximum of the 3 coordinates
nearest_index=triangles[tri_coords.max(axis=1,keepdims=1) == tri_coords]
block[vc[:,0],vc[:,1],vc[:,2]] = profiles[nearest_index,depths]
return block
def save_block(self, filename,block, dtype="ubyte"):
"""calls save block from volume tools"""
vt.save_mnc_block(filename, block,
origin=self.origin, resolution=self.resolution,
dtype=dtype)
return
def surface_mask_triangles(self, mask):
"""return triangles with all vertices in mask only"""
return self.triangles_to_include[np.any(mask[self.triangles_to_include],axis=1)]
def volume_mask_triangles(self, triangles_to_include):
"""return triangles with all vertices in block only"""
vertex_indices = np.unique(triangles_to_include)
#check if vertices inside block.+1 if above max, -1 if below origin
g_include=(self.gray_surface['coords'][vertex_indices] > self.max_dimension).astype(int) - (self.gray_surface['coords'][vertex_indices] < self.origin).astype(int)
w_include=(self.white_surface['coords'][vertex_indices] > self.max_dimension).astype(int) - (self.white_surface['coords'][vertex_indices] < self.origin).astype(int)
#exclude if both are either 1 or -1, so if multiplied +1.
exclude=np.any((g_include*w_include)==1,axis=1)
#include if either grey or white is inside
surface_mask_indices = vertex_indices[np.logical_not(exclude)]
surface_mask=np.zeros(len(self.gray_surface['coords'])).astype(bool)
surface_mask[surface_mask_indices] = True
np.all(surface_mask[self.triangles_to_include],axis=1)
#mask triangles
return self.surface_mask_triangles(surface_mask)
def calculate_volume_surf_coordinates(self):
"""calculate cortical depths and barycentric coordinates for voxels and triangles in volume
and store in data dictionary"""
print('{} triangles included'.format(len(self.triangles_to_include)))
for k,triangle in enumerate(self.triangles_to_include):
if k % 10000 ==0:
print('{}% done'.format(100*k/len(self.triangles_to_include)))
prism=self.generate_prism(self.gray_surface['coords'],self.white_surface['coords'],triangle)
bbox = SurfaceVolumeMapper.prism_bounding_box(prism)
world_coords, voxel_coords= SurfaceVolumeMapper.voxel_world_coords_in_box(bbox,self.origin, self.resolution, self.dimensions)
wc, vc, depths, tri_coords=SurfaceVolumeMapper.get_depth_and_barycentric_coordinates_for_prism(world_coords,voxel_coords,prism)
if len(vc)>0:
self.volume_surf_coordinates['voxel_coordinates'].extend(vc.tolist())
self.volume_surf_coordinates['depths'].extend(depths.tolist())
self.volume_surf_coordinates['triangles'].extend(np.tile(triangle,(len(depths),1)).tolist())
self.volume_surf_coordinates['triangle_coordinates'].extend(tri_coords.tolist())
lv=len(self.volume_surf_coordinates['voxel_coordinates'])
ld=len(self.volume_surf_coordinates['depths'])
for key in self.volume_surf_coordinates.keys():
self.volume_surf_coordinates[key] = np.array(self.volume_surf_coordinates[key])
assert lv==ld,'lengths dont match depths={}voxel_coords{}'.format(ld,lv)
return
def calculate_volume_surf_coordinates_parallel(self):
"""calculate depths and barycentric coordinates for voxels and triangles in volume
in parallel"""
num_process=1
volume_surf_coordinates={'voxel_coordinates':[],
'triangles':[],
'depths':[],
'triangle_coordinates':[]}
subsets = np.array_split(np.arange(len(self.triangles_to_include)),num_process)
func = partial(SurfaceVolumeMapper.calculate_volume_surf_coordinates_one_prism,
self.gray_surface['coords'],self.white_surface['coords'],
self.triangles_to_include,
self.origin, self.resolution, self.dimensions, subsets)
t1=time.time()
#Threading doesn't work here but process pool does.
# with concurrent.futures.ProcessPoolExecutor(num_process) as pool:
# store = list(pool.map(func,range(len(subsets))))
store=[]
for k in range(len(subsets)):
store.append(SurfaceVolumeMapper.calculate_volume_surf_coordinates_one_prism(
self.gray_surface['coords'],self.white_surface['coords'],
self.triangles_to_include,
self.origin, self.resolution, self.dimensions, subsets,k))
for pool_output in store:
for key in self.volume_surf_coordinates.keys():
self.volume_surf_coordinates[key].extend(pool_output[key])
for key in self.volume_surf_coordinates.keys():
self.volume_surf_coordinates[key] = np.array(self.volume_surf_coordinates[key])
t2=time.time()
print('function time: ',t2-t1)
#for key in volume_surf_coordinates.keys():
# volume_surf_coordinates[key] = [x for x in volume_surf_coordinates[key] if x]
# self.volume_surf_coordinates2[key]=np.array([item for sublist in volume_surf_coordinates[key] for item in sublist])
#t3=time.time()
#print('sorting time: ',t3-t2)
return
@staticmethod
def calculate_volume_surf_coordinates_one_prism(
gray_surface_coords,white_surface_coords,
triangles,
origin, resolution, dimensions, subset_triangles,k):
"""calculate on subset of triangles"""
store_surf_coordinates={'voxel_coordinates':[],
'triangles':[],
'depths':[],
'triangle_coordinates':[]}
percentage_divider=np.round(len(subset_triangles[k])/10).astype(int)
for counter,tri_index in enumerate(subset_triangles[k]):
if counter % percentage_divider ==0:
print('Process {} is {}% done'.format(k,np.round(100*counter/len(subset_triangles[k]))))
prism = SurfaceVolumeMapper.generate_prism(gray_surface_coords, white_surface_coords, triangles[tri_index])
bbox = SurfaceVolumeMapper.prism_bounding_box(prism)
world_coords, voxel_coords= SurfaceVolumeMapper.voxel_world_coords_in_box(bbox,origin, resolution, dimensions)
wc, vc, depths, tri_coords=SurfaceVolumeMapper.get_depth_and_barycentric_coordinates_for_prism(world_coords,voxel_coords,prism)
#if some coordinates are returned, then store these
if len(vc)>0:
store_surf_coordinates['voxel_coordinates'].extend(vc.tolist())
store_surf_coordinates['depths'].extend(depths.tolist())
store_surf_coordinates['triangles'].extend(np.tile(triangles[tri_index],(len(depths),1)).tolist())
store_surf_coordinates['triangle_coordinates'].extend(tri_coords.tolist())
return store_surf_coordinates
@staticmethod
def generate_prism(gray_surface_coords,white_surface_coords,triangle):
"""return coordinates for prism in a dictionary
with two triangles
ordering is g1,g2,g3 - w1,w2,w3"""
prism_coordinates={'g_triangle':gray_surface_coords[triangle],'w_triangle':white_surface_coords[triangle]}
return prism_coordinates
@staticmethod
def bounding_box(coords):
"""calculate bounding box for input coordinates"""
mins=np.min(coords,axis=0)
maxs=np.max(coords,axis=0)
return mins, maxs
@staticmethod
def prism_bounding_box(prism):
"""returns the two defining corners of a box enclosing the prism.
i.e. the minimum and maximum values in the 3 dimensions."""
return SurfaceVolumeMapper.bounding_box(np.vstack((prism['g_triangle'],prism['w_triangle'])))
@staticmethod
def voxel_world_coords_in_box(box, origin_offset, voxel_resolution, dimensions):
"""calculate which voxels from a block/slice/volume are located a box (world coordinates)
returns coordinates of voxels and voxel indices
Assumes axis orderings of box, origin_offset, voxel resolution and dimensions are all the same
Usually xyz"""
#calculate box corners in voxel indices. Ensure voxel coordinates are non-negative and do not
#exceed volume limits
indices_min = np.min((np.max((np.floor((box[0] - origin_offset)/voxel_resolution),[0,0,0]),axis=0),dimensions),axis=0).astype(int)
indices_max = np.min((np.max((np.ceil((box[1]- origin_offset)/voxel_resolution), [0,0,0]), axis=0),dimensions), axis=0).astype(int)
if (indices_min == indices_max).all():
#box not in volume block.
return None, None
#get a grid of coordinates
voxel_coordinates=np.mgrid[indices_min[0]:indices_max[0],
indices_min[1]:indices_max[1],
indices_min[2]:indices_max[2]].T
voxel_coordinates = np.reshape(voxel_coordinates,(voxel_coordinates.size//3,3))
#convert to world coordinates
world_coordinates=origin_offset+voxel_coordinates*voxel_resolution
#mask out those not in block to speed up calculations on slices
return world_coordinates, voxel_coordinates.astype(int)
@staticmethod
def get_exact_depth_multiple_coordinates(voxel_coords,prism,decimals=5, printing=False):
"""returns exact coortical depth of point
due to imprecisions in estimating roots of the cubic, it is advisable to round to desired accuracy.
for 3mm cortex, decimals=5 gives an accuracy of 30 nanometers"""
#solve for depth
connecting_vectors = prism['w_triangle']-prism['g_triangle']
connecting_inplane_vectors = np.array([connecting_vectors[2]-connecting_vectors[0],
connecting_vectors[1]-connecting_vectors[0]])
#k2 term of cp
cross_product_connecting_vectors = np.cross(connecting_inplane_vectors[0],connecting_inplane_vectors[1])
gray_inplane_vectors = np.array([prism['g_triangle'][2]-prism['g_triangle'][0],
prism['g_triangle'][1]-prism['g_triangle'][0]])
#const term of cp
cross_product_gray_inplane_vectors = np.cross(gray_inplane_vectors[0],gray_inplane_vectors[1])
#k term of cp
cross_prod_gray_connecting1 = np.cross(gray_inplane_vectors[1], connecting_inplane_vectors[0])
cross_prod_gray_connecting2 = np.cross(gray_inplane_vectors[0], connecting_inplane_vectors[1])
cross_prod_gray_connecting_sum = -cross_prod_gray_connecting1+cross_prod_gray_connecting2
g3 = prism['g_triangle'][2]
v3 = connecting_vectors[2]
g3_voxel_coords = g3-voxel_coords
#precalculate fixed parts
k3 = np.dot(cross_product_connecting_vectors,v3)
k2_fixed=np.dot(v3,cross_prod_gray_connecting_sum)
k2 = k2_fixed+ np.dot(cross_product_connecting_vectors,g3_voxel_coords.T)
k1_fixed=np.dot(v3, cross_product_gray_inplane_vectors)
k1 = k1_fixed + np.dot(cross_prod_gray_connecting_sum,g3_voxel_coords.T)
k0 = np.dot(cross_product_gray_inplane_vectors,g3_voxel_coords.T)
## TODO adapt real cubic solve so that the outputs work and match solve.
# all_depths_c = mh.real_cubic_solve(k3, k2,k1,k0)
# all_depths_c[np.logical_or(all_depths_c<0,all_depths_c>1)]=float('NaN')
all_depths=np.zeros(len(voxel_coords))
for k, voxel_coord in enumerate(voxel_coords):
if printing:
print('inside',k)
#TODO replace with matrix roots function
#depths = np.roots([k3,k2[k],k1[k],k0[k]])
depths = mh.solve(k3, k2[k], k1[k], k0[k])
are_real = np.isreal(depths)
depths = np.round(np.real(depths[are_real]),decimals=decimals)
depths = depths[np.logical_and(depths>=0,depths<=1.0)]
if len(depths)==0:
all_depths[k]=float('NaN')
else:
all_depths[k]=depths[0]
#print(np.vstack((all_depths_c,all_depths)))
return all_depths
@staticmethod
def barycentric_coordinates(p,tri):
#solve to return coordinates as barycentric from 3 vertices of triangle.
#Use outputs for linear interpolation
a = (np.square(tri[0,0]-tri[2,0]) + np.square(tri[0,1]-tri[2,1]) + np.square(tri[0,2]-tri[2,2]))
b = (tri[1,0]-tri[2,0])*(tri[0,0]-tri[2,0]) + (tri[1,1]-tri[2,1])*(tri[0,1]-tri[2,1]) + (tri[1,2]-tri[2,2])*(tri[0,2]-tri[2,2])
c = b
d = (np.square(tri[1,0]-tri[2,0]) + np.square(tri[1,1]-tri[2,1]) + np.square(tri[1,2]-tri[2,2]))
f = (p[0] - tri[2,0])*(tri[0,0]-tri[2,0]) + (p[1]-tri[2,1])*(tri[0,1]-tri[2,1]) + (p[2]-tri[2,2])*(tri[0,2]-tri[2,2])
g = (p[0] - tri[2,0])*(tri[1,0]-tri[2,0]) + (p[1]-tri[2,1])*(tri[1,1]-tri[2,1]) + (p[2]-tri[2,2])*(tri[1,2]-tri[2,2])
chi = (d*f - b*g)/(a*d - b*c)
eta = (-c*f + a*g)/(a*d - b*c)
lambda1 = chi
lambda2 = eta
lambda3 = 1 - chi - eta
return lambda1, lambda2, lambda3
def barycentric_coordinates_matrix(p,tri):
#solve to return coordinates as barycentric from 3 vertices of triangle.
#Use outputs for linear interpolation
a = (np.square(tri[:,0,0]-tri[:,2,0]) + np.square(tri[:,0,1]-tri[:,2,1]) + np.square(tri[:,0,2]-tri[:,2,2]))
b = (tri[:,1,0]-tri[:,2,0])*(tri[:,0,0]-tri[:,2,0]) + (tri[:,1,1]-tri[:,2,1])*(tri[:,0,1]-tri[:,2,1]) + (tri[:,1,2]-tri[:,2,2])*(tri[:,0,2]-tri[:,2,2])
c = b
d = (np.square(tri[:,1,0]-tri[:,2,0]) + np.square(tri[:,1,1]-tri[:,2,1]) + np.square(tri[:,1,2]-tri[:,2,2]))
f = (p[:,0] - tri[:,2,0])*(tri[:,0,0]-tri[:,2,0]) + (p[:,1]-tri[:,2,1])*(tri[:,0,1]-tri[:,2,1]) + (p[:,2]-tri[:,2,2])*(tri[:,0,2]-tri[:,2,2])
g = (p[:,0] - tri[:,2,0])*(tri[:,1,0]-tri[:,2,0]) + (p[:,1]-tri[:,2,1])*(tri[:,1,1]-tri[:,2,1]) + (p[:,2]-tri[:,2,2])*(tri[:,1,2]-tri[:,2,2])
chi = (d*f - b*g)/(a*d - b*c)
eta = (-c*f + a*g)/(a*d - b*c)
lambda1 = chi
lambda2 = eta
lambda3 = 1 - chi - eta
return np.vstack((lambda1, lambda2, lambda3)).T
@staticmethod
def get_depth_and_barycentric_coordinates_for_prism(world_coords,voxel_coords,prism, printing = False):
"""calculate the precise depth and barycentric coordinates within a prism
of all world coordinates
depth - fractional depth from gray to white surface
barycentric - fractional distance from each vertex in triangle"""
if printing:
print(world_coords,prism, printing)
print(len(world_coords))
depths = SurfaceVolumeMapper.get_exact_depth_multiple_coordinates(world_coords,prism, printing=printing)
#filter out coordinates not in the right depth
if depths is None:
return None, None, None, None
world_coords = world_coords[~np.isnan(depths)]
voxel_coords = voxel_coords[~np.isnan(depths)]
depths=depths[~np.isnan(depths)]
#TODO if no depths.
#calculate barycentric coordinates for remaining voxels
vector=prism['w_triangle']-prism['g_triangle']
barycentric_coords = np.zeros((len(depths),3))
#for k, (world_coord, depth) in enumerate(zip(world_coords,depths)):
# barycentric_coords[k] = SurfaceVolumeMapper.barycentric_coordinates(world_coord, depth*vector +prism['g_triangle'])
barycentric_coords = SurfaceVolumeMapper.barycentric_coordinates_matrix(world_coords, vector*np.tile(depths,(3,3,1)).T +np.tile(prism['g_triangle'],(len(depths),1,1)))
#filter out coordinates outside of triangle
exclude=np.logical_or(np.any(barycentric_coords<0,axis=1),np.any(barycentric_coords>1,axis=1))
world_coords = world_coords[~exclude]
voxel_coords = voxel_coords[~exclude]
depths=depths[~exclude]
barycentric_coords=barycentric_coords[~exclude]
return world_coords, voxel_coords, depths, barycentric_coords
|
#!/usr/bin/python3
import binascii
import errno
import math
import os
import subprocess
import sys
import parted
from hemeraplatformsdk.imagebuilders.devices.BaseDevice import BaseDevice
from hemeraplatformsdk.imagebuilders.devices.BaseDevice import ExtractedFileTooBigException
from hemeraplatformsdk.imagebuilders.devices.BaseDevice import WrongPartitionTypeException
from hemeraplatformsdk.imagebuilders.devices.PartedHelper import PartedHelper
DD_SECTORS=8192
class RawDevice(BaseDevice):
def __init__(self, device_dictionary, image_builder):
super().__init__(device_dictionary, image_builder)
assert self.data["type"].startswith("raw")
try:
self.filename = os.path.join(self.builder.build_dir,
"{}_{}.raw".format(self.builder.image_name, self.data["install_device"]
.split("/")[-1].rsplit("p", 1)[0]))
except KeyError:
try:
self.filename = os.path.join(self.builder.build_dir,
"{}_{}.raw".format(self.builder.image_name,
self.data["partitions"][0]["device"]
.split("/")[-1].rsplit("p", 1)[0]))
except KeyError:
self.filename = os.path.join(self.builder.build_dir,
"{}.raw".format(self.builder.image_name))
# Allocate and setup
self.parted_helper = PartedHelper(self.filename)
self.partition_start = {}
self.mounted_partitions = []
def can_be_mounted(self):
return True
def get_base_mountpoint(self):
# We have a set of partitions: let's return the most basic one.
return sorted([p for p in self.data["partitions"] if "mountpoint" in p],
key=lambda x: x["mountpoint"][:-1].count('/'))[0]["mountpoint"]
def can_be_packaged(self):
return False
def has_fstab_entries(self):
return True
def needs_file_extraction(self):
return "dd" in self.data or [p for p in self.data["partitions"] if "flash" in p]
def extract_file(self, base_path):
# We need to access the device externally
self.parted_helper.device.beginExternalAccess()
try:
dd_call = ["dd", "if=" + os.path.join(base_path, self.data["dd"]["file"][1:]), "of=" + self.filename]
print("-- Running dd on built image...")
try:
file_size = os.path.getsize(os.path.join(base_path, self.data["dd"]["file"][1:]))
if file_size > self.data["dd"]["max_file_size"]:
raise ExtractedFileTooBigException("Extracted file {} is of size {}, which is bigger than {}. "
"Aborting.".format(self.data["dd"]["file"],
file_size, self.data["dd"]["max_file_size"]))
except KeyError:
pass
try:
dd_call.append("skip=" + str(self.data["dd"]["input_offset"]))
except KeyError:
pass
try:
dd_call.append("seek=" + str(self.data["dd"]["output_offset"]))
except KeyError:
pass
dd_call.append("conv=notrunc")
subprocess.check_call(dd_call)
try:
keep_in_image = self.data["dd"]["keep_in_image"]
except KeyError:
keep_in_image = False
if not keep_in_image:
os.remove(os.path.join(base_path, self.data["dd"]["file"][1:]))
except KeyError:
pass
for p in [p for p in self.data["partitions"] if "flash" in p]:
# Get the filename
if p["flash"].startswith(':'):
filename = os.path.join(base_path, p["flash"][1:])
else:
filename = p["flash"]
# Get the partition
for partition in self.parted_helper.disk.partitions:
if partition.name != p["name"]:
continue
print("--- Flashing {} onto {} starting from sector {}".format(filename, partition.name,
partition.geometry.start))
size = os.path.getsize(filename)
start_sector = partition.geometry.start
num_sectors = partition.geometry.end - partition.geometry.start
# let's check as much as possible before doing anything harmful...
assert num_sectors > 0, \
"num_partition_sectors is 0, don't know what to do"
assert size <= num_sectors * self.parted_helper.device.sectorSize, \
"File size is %d, too big for partition of size %d" \
% (size, num_sectors * self.parted_helper.device.sectorSize)
assert start_sector >= 0, \
"Start sector is %d" % start_sector
with open(filename, "rb") as f, open(self.filename, "rb+") as o:
o.seek(partition.geometry.start * self.parted_helper.device.sectorSize)
buf = f.read(DD_SECTORS * self.parted_helper.device.sectorSize)
while len(buf) > 0:
if len(buf) % self.parted_helper.device.sectorSize != 0:
# we might need to write the last few bytes
# and align to multiple of SECTOR_SIZE
read_sectors = int(math.ceil(len(buf) / self.parted_helper.device.sectorSize))
pad = b'00' * (self.parted_helper.device.sectorSize -
(len(buf) % self.parted_helper.device.sectorSize))
buf = binascii.unhexlify(binascii.hexlify(buf) + pad)
o.write(buf)
buf = f.read(DD_SECTORS * self.parted_helper.device.sectorSize)
break
try:
keep_in_image = p["keep_in_image"]
except KeyError:
keep_in_image = False
if not keep_in_image:
os.remove(filename)
# End access to Device
self.parted_helper.device.endExternalAccess()
def get_installer_actions(self):
# Just dd the raw file brutally.
try:
return [{
'type': 'dd',
'source': os.path.join('/installer', self.filename.split('/')[-1]),
'target': self.data["install_device"],
'run_on_full_flash': True,
'run_on_partial_flash': True
}]
except KeyError:
# This device has nothing to do.
return []
def create_device(self):
self.create_disk("msdos")
def create_disk(self, type):
# Let's start by computing overall size.
try:
disk_size = self.data["size"]
except KeyError:
# Add 8 MB of padding.
disk_size = 8
for p in self.data["partitions"]:
disk_size += p["size"]
self.parted_helper.create_disk(disk_size, type)
self.create_partitions()
def create_partitions(self):
for p in self.data["partitions"]:
try:
geometry = parted.Geometry(self.parted_helper.device, start=p["start_sector"], end=p["end_sector"])
exact_geom = True
except KeyError:
try:
start = p["start_sector"]
except KeyError:
start = self.parted_helper.get_free_regions(self.parted_helper.device.optimumAlignment)[-1].start + \
self.parted_helper.device.minimumAlignment.offset + \
self.parted_helper.device.minimumAlignment.grainSize
end = start + parted.sizeToSectors(int(p["size"]), 'MiB', self.parted_helper.device.sectorSize)
geometry = parted.Geometry(self.parted_helper.device, start=start, end=end)
exact_geom = False
try:
fs = parted.FileSystem(type=p["filesystem"], geometry=geometry)
except KeyError:
fs = None
# Don't care
pass
try:
if p["partition_type"] == "extended":
part_type = parted.PARTITION_EXTENDED
elif p["partition_type"] == "logical":
part_type = parted.PARTITION_LOGICAL
elif p["partition_type"] == "primary":
part_type = parted.PARTITION_NORMAL
else:
raise WrongPartitionTypeException("{} is not a valid partition type."
"Valid types are: extended, logical, primary.")
except KeyError:
part_type = parted.PARTITION_NORMAL
partition = self.parted_helper.add_partition(geometry=geometry, name=p["name"] if "name" in p else None,
part_type=part_type, fs=fs, exact_geom=exact_geom)
try:
for flag in p["flags"]:
if flag == "msftdata":
partition.setFlag(16)
elif flag == "boot":
partition.setFlag(parted.PARTITION_BOOT)
except KeyError:
# Don't care
pass
self.parted_helper.disk.commit()
# Add data
try:
self.partition_start[p["mountpoint"]] = partition.geometry.start
except KeyError:
pass
# Format the filesystem, if any
try:
# Do this print so we can trigger the exception!
print("--- Now formatting as {}".format(p["filesystem"]))
mkfs_call = ["mkfs." + self.data["filesystem"]]
try:
if self.data["filesystem"].startswith("ext"):
mkfs_call += ["-L", self.data["label"]]
elif self.data["filesystem"] == "vfat":
mkfs_call += ["-n", self.data["label"]]
except KeyError:
pass
mkfs_call += ["/dev/loop7"]
with open(os.devnull, "w") as f:
subprocess.check_call(
["losetup", "-o", str(partition.geometry.start * self.parted_helper.device.sectorSize),
"--sizelimit", str((partition.geometry.end - partition.geometry.start) *
self.parted_helper.device.sectorSize),
"/dev/loop7", self.filename])
subprocess.check_call([mkfs_call], stdout=f, stderr=f)
subprocess.check_call(["losetup", "-d", "/dev/loop7"])
except KeyError:
# Don't care
pass
def mount_device(self, base_path):
# We want to sort mountpoints based on the occurrences of the number of / (except for the first one, of course).
# / must always be mounted first. Then we do a rundown of each single tree level for each mountpoint, so that
# we make sure that no device is obfuscated.
for p in sorted([p for p in self.data["partitions"] if "mountpoint" in p],
key=lambda x: x["mountpoint"][:-1].count('/')):
# Ignore errors when making dirs
try:
os.makedirs(os.path.join(base_path, p["mountpoint"][1:]))
except IOError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print("--- Warning: creation of directory {} failed: {}."
.format(os.path.join(base_path, p["mountpoint"][1:]), exc.strerror), file=sys.stderr)
print("--- Mounting {}".format(p["mountpoint"]))
subprocess.check_call(
["mount", "-o", "loop,offset=" + str(self.partition_start[p["mountpoint"]] *
self.parted_helper.device.sectorSize),
self.filename, os.path.join(base_path, p["mountpoint"][1:])])
self.mounted_partitions.append(os.path.join(base_path, p["mountpoint"][1:]))
def unmount_device(self):
for p in reversed(self.mounted_partitions):
print("--- Unmounting {}".format(p))
subprocess.check_call(["umount", p])
def get_device_files(self):
return [self.filename]
def get_fstab_entries(self):
entries = []
for p in self.data["partitions"]:
check_fs = 0
if p["mountpoint"].startswith("/var"):
check_fs = 1
try:
entries.append('LABEL="{}" {} {} {} 0 {}'.format(p["label"], p["mountpoint"], p["filesystem"],
self.builder.get_partition_mount_options(p),
check_fs))
except KeyError:
entries.append("{} {} {} {} 0 {}".format(p["device"], p["mountpoint"], p["filesystem"],
self.builder.get_partition_mount_options(p),
check_fs))
return entries
def get_partitions(self):
return self.data["partitions"]
|
# Copyright (c) 2021 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import io
import logging
import pytest
from inators import log as inators_log
@pytest.mark.parametrize('level, value', [
('TRACE', inators_log.TRACE),
('DEBUG', logging.DEBUG),
('INFO', logging.INFO),
('WARNING', logging.WARNING),
('ERROR', logging.ERROR),
('CRITICAL', logging.CRITICAL),
('DISABLE', inators_log.DISABLE),
])
def test_level_names(level, value):
assert inators_log.levels[level] == value
@pytest.mark.parametrize('logger_level, msg_level, enabled', [
(inators_log.TRACE, inators_log.TRACE, True),
(inators_log.TRACE, logging.DEBUG, True),
(logging.DEBUG, inators_log.TRACE, False),
(inators_log.DISABLE, inators_log.TRACE, False),
(inators_log.DISABLE, logging.CRITICAL, False),
])
def test_level_values(logger_level, msg_level, enabled):
logger = logging.getLogger('{}.logger{}'.format(__name__, logger_level))
logger.setLevel(logger_level)
assert logger.isEnabledFor(msg_level) == enabled
@pytest.mark.parametrize('logger_level, msg, output', [
(inators_log.TRACE, u'foo', u'foo\n'),
(inators_log.DEBUG, u'bar', u''),
(inators_log.DISABLE, u'baz', u''),
])
def test_trace(logger_level, msg, output):
logger = inators_log.getLogger('{}.trace'.format(__name__))
logger.setLevel(logger_level)
stream = io.StringIO()
logger.addHandler(logging.StreamHandler(stream=stream))
logger.trace(msg)
assert stream.getvalue() == output
|
from mayan.apps.dependencies.classes import PythonDependency
PythonDependency(
module=__name__, name='gevent', version_string='==20.4.0'
)
PythonDependency(
module=__name__, name='gunicorn', version_string='==20.0.4'
)
PythonDependency(
module=__name__, name='whitenoise', version_string='==5.0.1'
)
|
from django.shortcuts import render_to_response, redirect
from django.contrib import messages
from django.template import RequestContext
from django.contrib.sites.models import Site
from django.shortcuts import render_to_response, redirect
from django.contrib.auth.models import User
from registration.models import RegistrationProfile
from urllib import urlopen
from StringIO import StringIO
from django.template.loader import render_to_string
from django.forms.models import model_to_dict
from home.constants import HTML_SOCIAL_AUTH_FORM, URL_SOCIAL_AUTH_GET_DETAILS, URL_LOGIN, URL_SOCIAL_AUTH_COMPLETE
from socializing.models import UserProfile, UserProfileForm
from social.pipeline.partial import partial
from pprint import pprint
from django.core.files import File
from django.core.files.uploadedfile import InMemoryUploadedFile
from requests import request, HTTPError
from django.core.files.base import ContentFile
from constants import *
from utilities.utils import *
import settings, ipdb
#Used by social auth pipeline to get a username value when authenticate a social user for the first time
@partial
def redirect_to_form(strategy, backend, uid, response, details, user=None, is_new=False, *args, **kwargs):
request = strategy.request
if is_new == True:
if backend.name == "twitter":
pic_url = response.get('profile_image_url').replace("_normal", "")
elif backend.name == "facebook":
pic_url = "http://graph.facebook.com/%s/picture?type=large" % response["id"]
if request.method == 'GET': #If this is the first time user is using social auth...
details["username"] = details["username"].strip().replace(" ", "")
form = UserProfileForm(initial=details)
form.fields.pop("password1")
form.fields.pop("password2")
return render_to_response(HTML_SOCIAL_AUTH_FORM, {
'form': form,
"pic_url": pic_url,
"RECAPTCHA_CLIENT_SECRET": settings.RECAPTCHA_CLIENT_SECRET,
"consent_form_minor_text":CONSENT_FORM_MINOR_TEXT,
"consent_form_adult_text":CONSENT_FORM_ADULT_TEXT
}, RequestContext(request))
if request.method == 'POST': #If the first-time user submits the form...
form = UserProfileForm(request.POST, request.FILES)
success, message = UserProfile.check_registration(post_obj=request.POST, userprofileform=form, social=True)
if success == False:
messages.error(request, message)
form.fields.pop("password1")
form.fields.pop("password2")
return render_to_response(HTML_SOCIAL_AUTH_FORM, {
'form': form,
'pic_url':pic_url,
"RECAPTCHA_CLIENT_SECRET": settings.RECAPTCHA_CLIENT_SECRET,
"consent_form_minor_text":CONSENT_FORM_MINOR_TEXT,
"consent_form_adult_text":CONSENT_FORM_ADULT_TEXT
}, RequestContext(request))
details["username"] = username = request.POST.get("username")
details["email"] = email = request.POST.get("email")
details["first_name"] = first_name = request.POST.get("first_name")
details["last_name"] = last_name = request.POST.get("last_name")
details["minor"] = is_minor(request.POST.get("dob"))
details["dob"] = request.POST.get("dob")
details["guardian_email"] = request.POST.get("guardian_email")
if bool(request.FILES): #Need to check if file was uploaded.
details["pic_url"] = request.FILES.get("img_path")
else:
details["pic_url"] = pic_url
print_info_msg("New Details: %s" % details)
elif user.is_active == True:
messages.success(request, "Welcome, %s!" % user.username)
else:
messages.error(request, "Sorry, you haven't fully activated your account yet!")
return redirect(URL_HOME)
@partial
def setup_user_details(strategy, backend, uid, response, details, user=None, is_new=False, *args, **kwargs):
request = strategy.request
if is_new == True:
if details["minor"] == True:
user.is_active = False #User is not supposed to be activated because of minor status.
registration_profile = RegistrationProfile.objects.create_profile(user)
profile = user.userprofile
profile.is_minor = True
profile.guardian_email = details["guardian_email"]
profile.guardian_activation_key = create_sha1_hash(details["username"])
#Send an email to the minor.
registration_profile.send_activation_email(Site.objects.get_current())
#Send an email to the guardian with activation key.
email_subject = render_to_string(TEXTFILE_EMAIL_GUARDIAN_SUBJECT, {})
email_body = render_to_string(TEXTFILE_EMAIL_GUARDIAN_BODY, {
"participant_email": details["email"],
"guardian_activation_key": profile.guardian_activation_key,
"consent_form_guardian_text": CONSENT_FORM_GUARDIAN_TEXT,
"site": Site.objects.get_current()
})
send_email(email_subject, email_body, None, [profile.guardian_email])
print_info_msg ("(SOCIAL AUTH): RegistrationProfile now created for inactive user %s" % user)
user.first_name = details["first_name"]
user.last_name = details["last_name"]
user.username = details["username"]
user.email = details["email"]
user.save()
if type(details["pic_url"]) == InMemoryUploadedFile:
img_path = details["pic_url"]
else:
img_path = StringIO(urlopen(details["pic_url"]).read())
profile = user.userprofile
profile.set_images(img_path, save=True, rotation=request.POST.get("img_rotation"))
profile.set_date_of_birth(details["dob"])
profile.social_profile = True
profile.save()
if profile.is_minor == True:
messages.success(request, "Thanks for registering for EPM! Look for an account activation link sent to both you and your parent/guardian email address.")
else:
messages.success(request, "Welcome to EmergencyPetMatcher, %s!" % details["username"])
|
"""Data model and functions for Tapis apps
"""
from tapis_cli.commands.taccapis.v2 import SERVICE_VERSION
from tapis_cli.commands.taccapis import TapisModel
from tapis_cli.display import Verbosity
from tapis_cli.search import argtype, argmod
__all__ = ['Metadata', 'API_NAME', 'SERVICE_VERSION']
API_NAME = 'meta'
class Metadata(TapisModel):
"""Model of a Tapis metadata record
"""
service_id_type = 'Unique'
SEARCH_ARGS = [
# JSON_field, type, verbosity, mods_allowed, default_mod, choices, override_option, searchable
("uuid", argtype.STRING, Verbosity.BRIEF, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, True),
("schemaId", argtype.STRING, Verbosity.RECORD_VERBOSE,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, True),
("internalUsername", argtype.STRING, Verbosity.RECORD_VERBOSE,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("owner", argtype.STRING, Verbosity.BRIEF, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, True),
("associationIds", argtype.ARRAY, Verbosity.RECORD,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("name", argtype.STRING, Verbosity.BRIEF, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, True),
("value", argtype.STRING, Verbosity.RECORD, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("created", argtype.DATETIME, Verbosity.RECORD, argmod.DATE_DEFAULTS,
argmod.DATE_DEFAULT_MOD, None, None, False),
("lastUpdated", argtype.DATETIME, Verbosity.RECORD,
argmod.DATE_DEFAULTS, argmod.DATE_DEFAULT_MOD, None, None, False),
("_links", argtype.ARRAY, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, 'links', False)
]
|
# WARNING: Please don't edit this file. It was generated by Python/WinRT
from pyrt import _import_ns
import typing
import enum
__ns__ = _import_ns("Windows.Media")
try:
import pyrt.windows.applicationmodel.appservice
except:
pass
try:
import pyrt.windows.foundation
except:
pass
try:
import pyrt.windows.foundation.collections
except:
pass
try:
import pyrt.windows.graphics.directx
except:
pass
try:
import pyrt.windows.graphics.directx.direct3d11
except:
pass
try:
import pyrt.windows.graphics.imaging
except:
pass
try:
import pyrt.windows.storage
except:
pass
try:
import pyrt.windows.storage.streams
except:
pass
class AudioBufferAccessMode(enum.IntEnum):
Read = 0
ReadWrite = 1
Write = 2
class AudioProcessing(enum.IntEnum):
Default = 0
Raw = 1
class MediaPlaybackAutoRepeatMode(enum.IntEnum):
NONE = 0
Track = 1
List = 2
class MediaPlaybackStatus(enum.IntEnum):
Closed = 0
Changing = 1
Stopped = 2
Playing = 3
Paused = 4
class MediaPlaybackType(enum.IntEnum):
Unknown = 0
Music = 1
Video = 2
Image = 3
class MediaTimelineControllerState(enum.IntEnum):
Paused = 0
Running = 1
Stalled = 2
Error = 3
class SoundLevel(enum.IntEnum):
Muted = 0
Low = 1
Full = 2
class SystemMediaTransportControlsButton(enum.IntEnum):
Play = 0
Pause = 1
Stop = 2
Record = 3
FastForward = 4
Rewind = 5
Next = 6
Previous = 7
ChannelUp = 8
ChannelDown = 9
class SystemMediaTransportControlsProperty(enum.IntEnum):
SoundLevel = 0
AudioBuffer = __ns__.AudioBuffer
AudioFrame = __ns__.AudioFrame
AutoRepeatModeChangeRequestedEventArgs = __ns__.AutoRepeatModeChangeRequestedEventArgs
ImageDisplayProperties = __ns__.ImageDisplayProperties
MediaExtensionManager = __ns__.MediaExtensionManager
MediaMarkerTypes = __ns__.MediaMarkerTypes
MediaProcessingTriggerDetails = __ns__.MediaProcessingTriggerDetails
MediaTimelineController = __ns__.MediaTimelineController
MediaTimelineControllerFailedEventArgs = __ns__.MediaTimelineControllerFailedEventArgs
MusicDisplayProperties = __ns__.MusicDisplayProperties
PlaybackPositionChangeRequestedEventArgs = __ns__.PlaybackPositionChangeRequestedEventArgs
PlaybackRateChangeRequestedEventArgs = __ns__.PlaybackRateChangeRequestedEventArgs
ShuffleEnabledChangeRequestedEventArgs = __ns__.ShuffleEnabledChangeRequestedEventArgs
SystemMediaTransportControls = __ns__.SystemMediaTransportControls
SystemMediaTransportControlsButtonPressedEventArgs = __ns__.SystemMediaTransportControlsButtonPressedEventArgs
SystemMediaTransportControlsDisplayUpdater = __ns__.SystemMediaTransportControlsDisplayUpdater
SystemMediaTransportControlsPropertyChangedEventArgs = __ns__.SystemMediaTransportControlsPropertyChangedEventArgs
SystemMediaTransportControlsTimelineProperties = __ns__.SystemMediaTransportControlsTimelineProperties
VideoDisplayProperties = __ns__.VideoDisplayProperties
VideoEffects = __ns__.VideoEffects
VideoFrame = __ns__.VideoFrame
IMediaExtension = __ns__.IMediaExtension
IMediaFrame = __ns__.IMediaFrame
IMediaMarker = __ns__.IMediaMarker
IMediaMarkers = __ns__.IMediaMarkers
MediaTimeRange = __ns__.MediaTimeRange
|
from PIL import Image, ImageDraw, ImageFont # type: ignore
from view import View
class Label(View):
def __init__(self, text: str, width: int = None, color: int = 0xffffff, font: ImageFont = None) -> None:
self.text = text
self.color = color
self.font = font or ImageFont.load_default()
char_width, self._height = self.font.getsize("X")
self._width = width or char_width * len(text)
@property
def width(self) -> int:
return self._width
@property
def height(self) -> int:
return self._height
def paint(self, image: Image) -> None:
draw = ImageDraw.Draw(image)
draw.text((0, 0), self.text, fill=self.color, font=self.font)
|
from sqlalchemy.orm import relationship
from zeeguu_core.model.feed import db, RSSFeed
from zeeguu_core.model.user import User
import sqlalchemy
class RSSFeedRegistration(db.Model):
__table_args__ = {'mysql_collate': 'utf8_bin'}
__tablename__ = 'rss_feed_registration'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = relationship(User)
rss_feed_id = db.Column(db.Integer, db.ForeignKey(RSSFeed.id))
rss_feed = relationship(RSSFeed)
def __init__(self, user, feed):
self.user = user
self.rss_feed = feed
def __str__(self):
return f'RSS Feed Registration ({self.user.name}, {self.rss_feed})'
def __repr__(self):
return f'RSS Feed Registration ({self.user.name}, {self.rss_feed})'
@classmethod
def find_or_create(cls, session, user, feed):
try:
return (cls.query.filter(cls.user == user)
.filter(cls.rss_feed == feed)
.one())
except sqlalchemy.orm.exc.NoResultFound:
new = cls(user, feed)
session.add(new)
session.commit()
return new
@classmethod
def feeds_for_user(cls, user):
"""
would have been nicer to define a method on the User class get feeds,
but that would pollute the user model, and it's not nice.
:param user:
:return:
"""
return cls.query.filter(cls.user == user).all()
@classmethod
def non_subscribed_feeds(cls, user: 'User', language_code: 'str') -> '[RSSFeed]':
already_registered = [each.rss_feed for each in cls.feeds_for_user(user)]
all_available_for_language = RSSFeed.find_for_language_id(language_code)
return [feed
for feed in all_available_for_language
if not (feed in already_registered)]
@classmethod
def with_id(cls, i):
return (cls.query.filter(cls.id == i)).one()
@classmethod
def with_feed_id(cls, i, user):
return (cls.query.filter(cls.rss_feed_id == i)) \
.filter(cls.user_id == user.id).one()
|
import asyncio
import pandas as pd # type: ignore
import pyEX # type: ignore
from collections import deque
from datetime import datetime, timedelta
from tqdm import tqdm # type: ignore
from aat.exchange import Exchange
from aat.config import InstrumentType, EventType, Side, TradingType
from aat.core import ExchangeType, Instrument, Event, Trade, Order
_iex_instrument_types = {
'ad': InstrumentType.EQUITY, # ad - ADR
'gdr': InstrumentType.EQUITY, # gdr - GDR
're': InstrumentType.OTHER, # re - REIT
'ce': InstrumentType.MUTUALFUND, # ce - Closed end fund
'si': InstrumentType.EQUITY, # si - Secondary Issue
'lp': InstrumentType.OTHER, # lp - Limited Partnerships
'cs': InstrumentType.EQUITY, # cs - Common Stock
'et': InstrumentType.EQUITY, # et - ETF
'wt': InstrumentType.OTHER, # wt - Warrant
'rt': InstrumentType.OTHER, # rt – Right
'oef': InstrumentType.MUTUALFUND, # oef - Open Ended Fund
'cef': InstrumentType.MUTUALFUND, # cef - Closed Ended Fund
'ps': InstrumentType.EQUITY, # ps - Preferred Stock
'ut': InstrumentType.OTHER, # ut - Unit
'struct': InstrumentType.OTHER, # struct - Structured Product
}
class IEX(Exchange):
'''Investor's Exchange'''
def __init__(self, trading_type, verbose, api_key, is_sandbox, timeframe='1y', start_date=None, end_date=None):
super().__init__(ExchangeType('iex'))
self._trading_type = trading_type
self._verbose = verbose
self._api_key = api_key
self._is_sandbox = is_sandbox
if trading_type == TradingType.LIVE:
assert not is_sandbox
self._timeframe = timeframe
if timeframe == 'live':
assert trading_type != TradingType.BACKTEST
if timeframe == '1d':
# intraday testing
# TODO if today is weekend/holiday, pick last day with data
self._start_date = datetime.strptime(start_date, '%Y%m%d') if start_date else datetime.today()
self._end_date = datetime.strptime(end_date, '%Y%m%d') if end_date else datetime.today()
self._subscriptions = []
# "Order" management
self._queued_orders = deque()
self._order_id = 1
# *************** #
# General methods #
# *************** #
async def connect(self):
'''connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
'''
self._client = pyEX.Client(self._api_key, 'sandbox' if self._is_sandbox else 'stable')
# ******************* #
# Market Data Methods #
# ******************* #
async def instruments(self):
'''get list of available instruments'''
instruments = []
symbols = self._client.symbols()
for record in symbols:
if not record['isEnabled'] or not record['type'] or record['type'] == 'temp':
continue
symbol = record['symbol']
brokerExchange = record['exchange']
type = _iex_instrument_types[record['type']]
currency = Instrument(type=InstrumentType.CURRENCY, name=record['currency'])
try:
inst = Instrument(name=symbol, type=type, exchange=self.exchange(), brokerExchange=brokerExchange, currency=currency)
except AssertionError:
# Happens sometimes on sandbox
continue
instruments.append(inst)
return instruments
async def subscribe(self, instrument):
self._subscriptions.append(instrument)
async def tick(self):
'''return data from exchange'''
if self._timeframe == 'live':
data = deque()
def _callback(record):
data.append(record)
self._client.tradesSSE(symbols=",".join([i.name for i in self._subscriptions]),
on_data=_callback)
while True:
while data:
record = data.popleft()
volume = record['volume']
price = record['price']
instrument = Instrument(record['symbol'], InstrumentType.EQUITY)
o = Order(volume=volume, price=price, side=Side.BUY, instrument=instrument, exchange=self.exchange())
t = Trade(volume=volume, price=price, taker_order=o, maker_orders=[])
yield Event(type=EventType.TRADE, target=t)
await asyncio.sleep(0)
else:
dfs = []
if self._timeframe != '1d':
for i in tqdm(self._subscriptions, desc="Fetching data..."):
df = self._client.chartDF(i.name, timeframe=self._timeframe)
df = df[['close', 'volume']]
df.columns = ['close:{}'.format(i.name), 'volume:{}'.format(i.name)]
dfs.append(df)
data = pd.concat(dfs, axis=1)
data.sort_index(inplace=True)
data = data.groupby(data.index).last()
data.drop_duplicates(inplace=True)
data.fillna(method='ffill', inplace=True)
else:
for i in tqdm(self._subscriptions, desc="Fetching data..."):
date = self._start_date
subdfs = []
while date <= self._end_date:
df = self._client.chartDF(i.name, timeframe='1d', date=date.strftime('%Y%m%d'))
if not df.empty:
df = df[['average', 'volume']]
df.columns = ['close:{}'.format(i.name), 'volume:{}'.format(i.name)]
subdfs.append(df)
date += timedelta(days=1)
dfs.append(pd.concat(subdfs))
data = pd.concat(dfs, axis=1)
data.index = [x + timedelta(hours=int(y.split(':')[0]), minutes=int(y.split(':')[1])) for x, y in data.index]
data = data.groupby(data.index).last()
data.drop_duplicates(inplace=True)
data.fillna(method='ffill', inplace=True)
for index in data.index:
for i in self._subscriptions:
volume = data.loc[index]['volume:{}'.format(i.name)]
price = data.loc[index]['close:{}'.format(i.name)]
if volume == 0:
continue
o = Order(volume=volume, price=price, side=Side.BUY, instrument=i, exchange=self.exchange())
o.timestamp = index.to_pydatetime()
t = Trade(volume=volume, price=price, taker_order=o, maker_orders=[])
yield Event(type=EventType.TRADE, target=t)
await asyncio.sleep(0)
while self._queued_orders:
order = self._queued_orders.popleft()
order.timestamp = index
t = Trade(volume=order.volume, price=order.price, taker_order=order, maker_orders=[])
t.my_order = order
yield Event(type=EventType.TRADE, target=t)
await asyncio.sleep(0)
# ******************* #
# Order Entry Methods #
# ******************* #
async def newOrder(self, order: Order):
'''submit a new order to the exchange. should set the given order's `id` field to exchange-assigned id
For MarketData-only, can just return None
'''
if self._trading_type == TradingType.LIVE:
raise NotImplementedError("Live OE not available for IEX")
order.id = self._order_id
self._order_id += 1
self._queued_orders.append(order)
return order
# Not implemented, data-only
Exchange.registerExchange('iex', IEX)
|
import warnings
import math
import struct
import pyctrl.block as block
import pyctrl.bbb.mpu9150 as mpu9150
import numpy
class Raw(block.Block):
def read(self):
#print('> read')
if self.enabled:
# read imu
self.output = mpu9150.read()
#print('< read')
return self.output
class Inclinometer(block.Block):
def __init__(self, *vars, **kwargs):
# turns initialization
self.turns = 0
self.theta = 0
self.threshold = 0.25
# call super
super().__init__(*vars, **kwargs)
def reset(self):
self.turns = 0
def read(self):
#print('> read')
if self.enabled:
# read IMU
ax, ay, az, gx, gy, gz = mpu9150.read()
# compensate for turns
theta = -math.atan2(az, ay) / (2 * math.pi)
if (theta < 0 and self.theta > 0):
if (self.theta - theta > self.threshold):
self.turns += 1
elif (theta > 0 and self.theta < 0):
if (theta - self.theta > self.threshold):
self.turns -= 1
self.theta = theta
self.output = (self.turns + theta, gx / 360)
#print('< read')
return self.output
if __name__ == "__main__":
import time, math
from time import perf_counter
T = 0.04
K = 1000
print("\n> Testing Inclinometer")
giro = Inclinometer()
print("\n> ")
giro.set_enabled(enabled = True)
N = 100
for k in range(N):
# read inclinometer
(theta, thetadot) = giro.read()
time.sleep(.1)
print('\r> theta = {:+05.3f} theta dot = {:+05.3f} 1/s'.format(theta, thetadot), end='')
|
#!/usr/bin/env python
__author__ = 'Will Kamp'
__copyright__ = 'Copyright 2013, Matrix Mariner Inc.'
__license__ = 'BSD'
__email__ = 'will@mxmariner.com'
__status__ = 'Development' # 'Prototype', 'Development', or 'Production'
'''This is a database of sorts for nautical chart regions, their providing hydro-graphic offices
and additional information such as a listing of files or description.
'''
import os.path
from . import config
from .noaaxml import NoaaXmlReader
from . import lookups
from . import wl_filter_list_generator
from . import file_name_sanitizer
from .region_constants import *
from .search import MapPathSearch
class _RegionInfo:
def __init__(self, desc, map_type):
self.description = desc
self.map_type = map_type
class _RegionDatabase:
def __init__(self):
self.db = {}
self.rdb = {}
self.provider_dirs = {}
def add_provider(self, provider, map_dir):
if provider not in self.db:
self.db[provider] = {}
self.provider_dirs[provider] = map_dir
def add_region(self, provider, region, desc, map_type):
if provider in self.db:
self.db[provider][region] = _RegionInfo(desc, map_type)
self.rdb[region] = provider
def provider_for_region(self, region):
region = region.upper()
if region in self.rdb:
return self.rdb[region]
return None
def get_description(self, provider, region):
if provider in self.db and region in self.db[provider]:
return self.db[provider][region].description
def get_map_type(self, provider, region):
if provider in self.db and region in self.db[provider]:
return self.db[provider][region].map_type
def get_directory_for_provider(self, provider):
if provider in self.provider_dirs:
return self.provider_dirs[provider]
def provider_has_region(self, provider, region):
return provider in self.db and region in self.db[provider]
def is_valid_region(self, region):
return region in self.rdb.keys()
# Chart format types
map_type_bsb = 'kap'
map_type_geotiff = 'tif'
# Providers
provider_noaa = 'noaa'
provider_faa = 'faa'
provider_brazil = 'brazil'
provider_linz = 'linz'
provider_ukho = 'ukho'
provider_wavey_lines = 'wavey-lines'
# Build the database
_db = _RegionDatabase()
# US - NOAA
_db.add_provider(provider_noaa, config.noaa_bsb_dir)
_db.add_region(provider_noaa, REGION_02, 'Block Island RI to the Canadian Border', map_type_bsb)
_db.add_region(provider_noaa, REGION_03, 'New York to Nantucket and Cape May NJ', map_type_bsb)
_db.add_region(provider_noaa, REGION_04, 'Chesapeake and Delaware Bays', map_type_bsb)
_db.add_region(provider_noaa, REGION_06, 'Norfolk VA to Florida including the ICW', map_type_bsb)
_db.add_region(provider_noaa, REGION_07, 'Florida East Coast and the Keys', map_type_bsb)
_db.add_region(provider_noaa, REGION_08, 'Florida West Coast and the Keys', map_type_bsb)
_db.add_region(provider_noaa, REGION_10, 'Puerto Rico and the U.S. Virgin Islands', map_type_bsb)
_db.add_region(provider_noaa, REGION_12, 'Southern California, Point Arena to the Mexican Border', map_type_bsb)
_db.add_region(provider_noaa, REGION_13, 'Lake Michigan', map_type_bsb)
_db.add_region(provider_noaa, REGION_14, 'San Francisco to Cape Flattery', map_type_bsb)
_db.add_region(provider_noaa, REGION_15, 'Pacific Northwest, Puget Sound to the Canadian Border', map_type_bsb)
_db.add_region(provider_noaa, REGION_17, 'Mobile AL to the Mexican Border', map_type_bsb)
_db.add_region(provider_noaa, REGION_22, 'Lake Superior and Lake Huron (U.S. Waters)', map_type_bsb)
_db.add_region(provider_noaa, REGION_24, 'Lake Erie (U.S. Waters)', map_type_bsb)
_db.add_region(provider_noaa, REGION_26, 'Lake Ontario (U.S. Waters)', map_type_bsb)
_db.add_region(provider_noaa, REGION_30, 'Southeast Alaska', map_type_bsb)
_db.add_region(provider_noaa, REGION_32, 'South Central Alaska, Yakutat to Kodiak', map_type_bsb)
_db.add_region(provider_noaa, REGION_34, 'Alaska, The Aleutians and Bristol Bay', map_type_bsb)
_db.add_region(provider_noaa, REGION_36, 'Alaska, Norton Sound to Beaufort Sea', map_type_bsb)
_db.add_region(provider_noaa, REGION_40, 'Hawaiian Islands and U.S. Territories', map_type_bsb)
# BRAZIL NAVY
_db.add_provider(provider_brazil, config.brazil_bsb_dir)
_db.add_region(provider_brazil, REGION_BR, 'Brazil: Guyana to Uruguay', map_type_bsb)
# New Zealand - LINZ
_db.add_provider(provider_linz, config.linz_bsb_dir)
_db.add_region(provider_linz, REGION_NZ, 'New Zealand and South Pacific: Samoa to Ross Sea', map_type_bsb)
# United Kingdom - UKHO
_db.add_provider(provider_ukho, config.ukho_geotiff_dir)
_db.add_region(provider_ukho, REGION_UK1, 'United Kingdom North East Coast to Shetland Islands', map_type_geotiff)
_db.add_region(provider_ukho, REGION_UK2, 'United Kingdom South East Coast and Channel Islands', map_type_geotiff)
_db.add_region(provider_ukho, REGION_UK3, 'United Kingdom North West Coast and Ireland West Coast', map_type_geotiff)
_db.add_region(provider_ukho, REGION_UK4, 'United Kingdom South West Coast and Ireland East Coast - Irish Sea',
map_type_geotiff)
# Wavey Lines
_db.add_provider(provider_wavey_lines, config.wavey_line_geotiff_dir)
_db.add_region(provider_wavey_lines, REGION_WL1, 'Caribbean West Florida and Bahamas to Long Island', map_type_geotiff)
_db.add_region(provider_wavey_lines, REGION_WL2, 'Caribbean East Turks And Caicos Islands Crooked Island to Dominican Republic', map_type_geotiff)
# FAA
_db.add_provider(provider_faa, config.faa_geotiff_dir)
_db.add_region(provider_faa, REGION_FAA_PLANNING, 'FAA VFR Planning Charts', map_type_geotiff)
_db.add_region(provider_faa, REGION_FAA_SECTIONAL, 'FAA VFR Sectional Charts', map_type_geotiff)
_db.add_region(provider_faa, REGION_FAA_TERMINAL, 'FAA VFR Terminal Charts', map_type_geotiff)
_db.add_region(provider_faa, REGION_FAA_HELICOPTER, 'FAA VFR Helicopter charts', map_type_geotiff)
_db.add_region(provider_faa, REGION_FAA_CARIBBEAN, 'FAA VFR Caribbean Charts', map_type_geotiff)
def description_for_region(region):
"""returns the description for region defined in regions.py"""
provider = _db.provider_for_region(region)
region = region.upper()
return _db.get_description(provider, region)
def map_type_for_region(region):
"""returns the regions map type file extension eg. tif or kap"""
provider = _db.provider_for_region(region)
region = region.upper()
return _db.get_map_type(provider, region)
def map_list_for_region(region):
"""returns a list of absolute paths to chart files for queried region"""
provider = _db.provider_for_region(region)
region = region.upper()
if _db.provider_has_region(provider, region):
if provider == provider_noaa:
reader = NoaaXmlReader(region)
mps = MapPathSearch(config.noaa_bsb_dir, [map_type_for_region(region)], reader.get_map_files())
return mps.file_paths
elif provider == provider_linz:
mps = MapPathSearch(config.linz_bsb_dir, [map_type_for_region(region)])
return mps.file_paths
elif provider == provider_brazil:
mps = MapPathSearch(config.brazil_bsb_dir, [map_type_geotiff, map_type_bsb])
return mps.file_paths
elif provider == provider_wavey_lines:
file_name_sanitizer.sanitize(config.wavey_line_geotiff_dir)
return wl_filter_list_generator.get_file_list_region_dictionary()[region]
elif provider == provider_ukho:
region_txt = os.path.join(config.ukho_meta_dir, region.upper() + '.txt')
paths = []
with open(region_txt, 'r') as manifest:
for ea in manifest.readlines():
p = os.path.join(config.ukho_png_dir, ea.strip() + '.png')
if os.path.isfile(p):
paths.append(p)
else:
p = os.path.join(config.ukho_geotiff_dir, ea.strip() + '.tif')
if os.path.isfile(p):
paths.append(p)
else:
raise Exception('path not found for chart: ' + p)
return paths
elif provider == provider_faa:
map_dir = os.path.join(config.faa_geotiff_dir, region.upper())
mps = MapPathSearch(map_dir, [map_type_for_region(region)])
return mps.file_paths
else:
raise Exception('unknown region')
def lookup_for_region(region):
"""returns the lookup class for queried region
see lookups.py which are used to build the region's catalog
"""
provider = _db.provider_for_region(region)
if provider == provider_ukho:
return lookups.UKHOLookup()
elif provider == provider_wavey_lines:
return lookups.WaveylinesLookup()
elif provider == provider_brazil:
return lookups.BsbGdalMixLookup()
elif provider == provider_faa:
return lookups.FAALookup()
else:
return lookups.BsbLookup()
def provider_for_region(region):
"""returns the provider eg. noaa for queried region"""
return _db.provider_for_region(region)
def directory_for_provider(provider):
"""returns the directory where chart files live for queried region"""
provider = provider.lower()
return _db.get_directory_for_provider(provider)
def is_valid_region(region):
"""returns True or False"""
return _db.is_valid_region(region.upper())
def find_custom_region_path(region):
"""look for a custom regions' directory if this is not a known (invalid) region"""
for root, dirs, files in os.walk(config.map_dir):
if region in dirs:
return os.path.join(root, region)
return None |
"""Just a quick routine for resampling committor analysis results with a new settings object and without assuming the
existence of a restart.pkl file in the working directory."""
import os
import re
import glob
import shutil
from atesa import factory
from atesa import main
def resample_committor_analysis(settings):
"""
Resample committor analysis results with new settings without using restart.pkl.
Go to working directory, find every trajectory file, check its commitment (based on current settings), and based on
its name, combine those results with other trajectories from the same initial coordinates to produce a new
committor_analysis.out file.
Parameters
----------
settings : argparse.Namespace
Settings namespace object
Returns
-------
None
"""
original_dir = os.getcwd()
os.chdir(settings.working_directory) # move to working directory containing committor analysis trajectories
jobtype = factory.jobtype_factory('committor_analysis') # set jobtype
trajs = glob.glob('*.nc') # assume all .nc trajectories in the working directory are targets
# First, make new thread objects based on names of trajectories in trajs. We can take advantage of the fact that all
# the trajectories are names as: [threadname]_[int]_[another_int].nc
pattern = re.compile('_[0-9]+_[0-9]+\.nc')
allthreads = []
for traj in trajs:
try:
threadname = traj.replace(pattern.findall(traj)[-1], '')
except IndexError:
raise RuntimeError('Tried to get thread name from trajectory file: ' + traj + ', but it was not formatted '
'as expected. Are you sure this is an ATESA committor analysis directory?')
# Create new thread for this trajectory if necessary
if not threadname in [thread.name for thread in allthreads]:
try:
assert os.path.exists(threadname) # initial coordinate file for this thread needs to exist
except AssertionError:
raise RuntimeError('Found trajectories appearing to belong to a thread based on initial coordinate file'
' ' + threadname + ' but no such file was found in the working directory. Either '
'this isn\'t an ATESA committor analysis directory or it\'s corrupted or modified.')
thread = main.Thread()
# Initialize thread (modified from main.init_threads)
og_prmtop = settings.topology
if '/' in settings.topology:
settings.topology = settings.topology[settings.topology.rindex('/') + 1:]
try:
shutil.copy(og_prmtop, settings.working_directory + '/' + settings.topology)
except shutil.SameFileError:
pass
thread.topology = settings.topology
thread.current_type = []
jobtype.update_history(thread, settings, **{'initialize': True, 'inpcrd': threadname}) # initialize thread.history
thread.name = threadname
allthreads.append(thread)
# Append this trajectory to the list of moves in this thread
thread = allthreads[[threadname == thread.name for thread in allthreads].index(True)]
thread.history.prod_trajs.append(traj)
thread.current_type.append('prod')
# Now we can use the extant committor analysis method for getting results for each thread
for thread in allthreads:
jobtype.update_results(thread, allthreads, settings)
os.chdir(original_dir) # return to directory from which this was called
|
# Copyright (c) 2013, Jeff Terrace
# All rights reserved.
"""Module for connecting to and controlling the Logitech Harmony Link"""
|
# This Python module is part of the PyRate software package.
#
# Copyright 2021 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This Python module implements an algorithm to search for the location
of the interferometric reference pixel
"""
import os
from os.path import join
from typing import Tuple
from itertools import product
import numpy as np
from numpy import isnan, std, mean, sum as nsum
from joblib import Parallel, delayed
import pyrate.constants as C
from pyrate.core import ifgconstants as ifc
from pyrate.core import mpiops
from pyrate.core.shared import Ifg, nan_and_mm_convert
from pyrate.core.shared import joblib_log_level
from pyrate.core.logger import pyratelogger as log
from pyrate.core import prepifg_helper
from pyrate.configuration import Configuration, ConfigException
MAIN_PROCESS = 0
def update_refpix_metadata(ifg_paths, refx, refy, transform, params):
"""
Function that adds metadata about the chosen reference pixel to each interferogram.
"""
pyrate_refpix_lon, pyrate_refpix_lat = mpiops.run_once(convert_pixel_value_to_geographic_coordinate, refx, refy, transform)
process_ifgs_paths = mpiops.array_split(ifg_paths)
for ifg_file in process_ifgs_paths:
log.debug("Updating metadata for: "+ifg_file)
ifg = Ifg(ifg_file)
log.debug("Open dataset")
ifg.open(readonly=True)
nan_and_mm_convert(ifg, params)
half_patch_size = params["refchipsize"] // 2
x, y = refx, refy
log.debug("Extract reference pixel windows")
data = ifg.phase_data[y - half_patch_size: y + half_patch_size + 1,
x - half_patch_size: x + half_patch_size + 1]
log.debug("Calculate standard deviation for reference window")
stddev_ref_area = np.nanstd(data)
log.debug("Calculate mean for reference window")
mean_ref_area = np.nanmean(data)
ifg.add_metadata(**{
ifc.PYRATE_REFPIX_X: str(refx),
ifc.PYRATE_REFPIX_Y: str(refy),
ifc.PYRATE_REFPIX_LAT: str(pyrate_refpix_lat),
ifc.PYRATE_REFPIX_LON: str(pyrate_refpix_lon),
ifc.PYRATE_MEAN_REF_AREA: str(mean_ref_area),
ifc.PYRATE_STDDEV_REF_AREA: str(stddev_ref_area)
})
ifg.write_modified_phase()
ifg.close()
def convert_pixel_value_to_geographic_coordinate(refx, refy, transform):
"""
Converts a pixel coordinate to a latitude/longitude coordinate given the
geotransform of the image.
Args:
refx: The pixel x coordinate.
refy: The pixel ye coordinate.
transform: The geotransform array of the image.
Returns:
Tuple of lon, lat geographic coordinate.
"""
lon = lon_from_pixel_coordinate(refx, transform)
lat = lat_from_pixel_coordinate(refy, transform)
return lon, lat
def lat_from_pixel_coordinate(refy, transform):
yOrigin = transform[3]
pixelHeight = -transform[5]
lat = yOrigin - refy * pixelHeight
return lat
def lon_from_pixel_coordinate(refx, transform):
xOrigin = transform[0]
pixelWidth = transform[1]
lon = refx * pixelWidth + xOrigin
return lon
def convert_geographic_coordinate_to_pixel_value(lon, lat, transform):
"""
Converts a latitude/longitude coordinate to a pixel coordinate given the
geotransform of the image.
Args:
lon: Pixel longitude.
lat: Pixel latitude.
transform: The geotransform array of the image.
Returns:
Tuple of refx, refy pixel coordinates.
"""
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = -transform[5]
refx = round((lon - xOrigin) / pixelWidth)
refy = round((yOrigin - lat) / pixelHeight)
return int(refx), int(refy)
# TODO: move error checking to config step (for fail fast)
# TODO: this function is not used. Plan removal
def ref_pixel(ifgs, params):
"""
Determines the most appropriate reference pixel coordinate by conducting
a grid search and calculating the mean standard deviation with patches
around candidate pixels from the given interferograms.
If the config file REFX or REFY values are empty or negative, the search
for the reference pixel is performed. If the REFX|Y values are within the
bounds of the raster, a search is not performed. REFX|Y values outside
the upper bounds cause an exception.
:param list ifgs: List of interferogram objects
:param dict params: Dictionary of configuration parameters
:return: tuple of best REFX and REFY coordinates
:rtype: tuple
"""
half_patch_size, thresh, grid = ref_pixel_setup(ifgs, params)
parallel = params[C.PARALLEL]
if parallel:
phase_data = [i.phase_data for i in ifgs]
mean_sds = Parallel(n_jobs=params[C.PROCESSES],
verbose=joblib_log_level(C.LOG_LEVEL))(
delayed(_ref_pixel_multi)(g, half_patch_size, phase_data,
thresh, params) for g in grid)
refxy = find_min_mean(mean_sds, grid)
else:
phase_data = [i.phase_data for i in ifgs]
mean_sds = []
for g in grid:
mean_sds.append(_ref_pixel_multi(g, half_patch_size, phase_data, thresh, params))
refxy = find_min_mean(mean_sds, grid)
if isinstance(refxy, RefPixelError):
raise RefPixelError('Refpixel calculation not possible!')
refy, refx = refxy
if refy and refx:
return refy, refx
raise RefPixelError("Could not find a reference pixel")
def find_min_mean(mean_sds, grid):
"""
Determine the ref pixel block with minimum mean value
:param list mean_sds: List of mean standard deviations from each
reference pixel grid
:param list grid: List of ref pixel coordinates tuples
:return: Tuple of (refy, refx) with minimum mean
:rtype: tuple
"""
log.debug('Ranking ref pixel candidates based on mean values')
try:
refp_index = np.nanargmin(mean_sds)
return grid[refp_index]
except RefPixelError as v:
log.error(v)
return v
def ref_pixel_setup(ifgs_or_paths, params):
"""
Sets up the grid for reference pixel computation and saves numpy files
to disk for later use during ref pixel computation.
:param list ifgs_or_paths: List of interferogram filenames or Ifg objects
:param dict params: Dictionary of configuration parameters
:return: half_patch_size: size of patch
:rtype: float
:return: thresh
:rtype: float
:return: list(product(ysteps, xsteps))
:rtype: list
"""
log.debug('Setting up ref pixel computation')
refnx, refny, chipsize, min_frac = params[C.REFNX], \
params[C.REFNY], \
params[C.REF_CHIP_SIZE], \
params[C.REF_MIN_FRAC]
if len(ifgs_or_paths) < 1:
msg = 'Reference pixel search requires 2+ interferograms'
raise RefPixelError(msg)
if isinstance(ifgs_or_paths[0], str):
head = Ifg(ifgs_or_paths[0])
head.open(readonly=True)
else:
head = ifgs_or_paths[0]
# sanity check inputs
_validate_chipsize(chipsize, head)
_validate_minimum_fraction(min_frac)
_validate_search_win(refnx, refny, chipsize, head)
# pre-calculate useful amounts
half_patch_size = chipsize // 2
chipsize = half_patch_size * 2 + 1
thresh = min_frac * chipsize * chipsize
# do window searches across dataset, central pixel of stack with smallest
# mean is the reference pixel
rows, cols = head.shape
ysteps = _step(rows, refny, half_patch_size)
xsteps = _step(cols, refnx, half_patch_size)
log.debug('Ref pixel setup finished')
return half_patch_size, thresh, list(product(ysteps, xsteps))
def save_ref_pixel_blocks(grid, half_patch_size, ifg_paths, params):
"""
Save reference pixel grid blocks to numpy array files on disk
:param list grid: List of tuples (y, x) corresponding to ref pixel grids
:param int half_patch_size: patch size in pixels
:param list ifg_paths: list of interferogram paths
:param dict params: Dictionary of configuration parameters
:return: None, file saved to disk
"""
log.debug('Saving ref pixel blocks')
outdir = params[C.TMPDIR]
for pth in ifg_paths:
ifg = Ifg(pth)
ifg.open(readonly=True)
ifg.nodata_value = params[C.NO_DATA_VALUE]
ifg.convert_to_nans()
ifg.convert_to_mm()
for y, x in grid:
data = ifg.phase_data[y - half_patch_size:y + half_patch_size + 1,
x - half_patch_size:x + half_patch_size + 1]
data_file = join(outdir, 'ref_phase_data_{b}_{y}_{x}.npy'.format(
b=os.path.basename(pth).split('.')[0], y=y, x=x))
np.save(file=data_file, arr=data)
ifg.close()
log.debug('Saved ref pixel blocks')
def _ref_pixel_mpi(process_grid, half_patch_size, ifgs, thresh, params):
"""
Convenience function for MPI-enabled ref pixel calculation
"""
log.debug('Ref pixel calculation started')
mean_sds = []
for g in process_grid:
mean_sds.append(_ref_pixel_multi(g, half_patch_size, ifgs, thresh, params))
return mean_sds
def _ref_pixel_multi(g, half_patch_size, phase_data_or_ifg_paths,
thresh, params):
"""
Convenience function for ref pixel optimisation
"""
# pylint: disable=invalid-name
# phase_data_or_ifg is list of ifgs
y, x, = g
if isinstance(phase_data_or_ifg_paths[0], str):
# this consumes a lot less memory
# one ifg.phase_data in memory at any time
data = []
output_dir = params[C.TMPDIR]
for p in phase_data_or_ifg_paths:
data_file = os.path.join(output_dir,
'ref_phase_data_{b}_{y}_{x}.npy'.format(
b=os.path.basename(p).split('.')[0],
y=y, x=x))
data.append(np.load(file=data_file))
else: # phase_data_or_ifg is phase_data list
data = [p[y - half_patch_size:y + half_patch_size + 1,
x - half_patch_size:x + half_patch_size + 1]
for p in phase_data_or_ifg_paths]
valid = [nsum(~isnan(d)) > thresh for d in data]
if all(valid): # ignore if 1+ ifgs have too many incoherent cells
sd = [std(i[~isnan(i)]) for i in data]
return mean(sd)
else:
return np.nan
def _step(dim, ref, radius):
"""
Helper: returns range object of axis indices for a search window.
:param int dim: Total length of the grid dimension
:param int ref: The desired number of steps
:param int radius: The number of cells from the centre of the chip eg.
(chipsize / 2)
:return: range object of axis indices
:rtype: range
"""
# if ref == 1:
# # centre a single search step
# return xrange(dim // 2, dim, dim) # fake step to ensure single xrange value
# if ref == 2: # handle 2 search windows, method below doesn't cover the case
# return [radius, dim-radius-1]
# max_dim = dim - (2*radius) # max possible number for refn(x|y)
# step = max_dim // (ref-1)
step_size = dim // ref
return range(radius, dim-radius, step_size)
def _validate_chipsize(chipsize, head):
"""
Sanity check min chipsize
"""
if chipsize is None:
raise ConfigException('Chipsize is None')
if chipsize < 3 or chipsize > head.ncols or (chipsize % 2 == 0):
msg = "Chipsize setting must be >=3 and at least <= grid width"
raise RefPixelError(msg)
log.debug('Chipsize validation successful')
def _validate_minimum_fraction(min_frac):
"""
Sanity check min fraction
"""
if min_frac is None:
raise ConfigException('Minimum fraction is None')
if min_frac < 0.0 or min_frac > 1.0:
raise RefPixelError("Minimum fraction setting must be >= 0.0 and <= 1.0 ")
def _validate_search_win(refnx, refny, chipsize, head):
"""
Sanity check X|Y steps
"""
if refnx is None:
raise ConfigException('refnx is None')
max_width = (head.ncols - (chipsize-1))
if refnx < 1 or refnx > max_width:
msg = "Invalid refnx setting, must be > 0 and <= %s"
raise RefPixelError(msg % max_width)
if refny is None:
raise ConfigException('refny is None')
max_rows = (head.nrows - (chipsize-1))
if refny < 1 or refny > max_rows:
msg = "Invalid refny setting, must be > 0 and <= %s"
raise RefPixelError(msg % max_rows)
def __validate_supplied_lat_lon(params: dict) -> None:
"""
Function to validate that the user supplied lat/lon values sit within image bounds
"""
lon, lat = params[C.REFX], params[C.REFY]
if lon == -1 or lat == -1:
return
xmin, ymin, xmax, ymax = prepifg_helper.get_analysis_extent(
crop_opt=params[C.IFG_CROP_OPT],
rasters=[prepifg_helper.dem_or_ifg(p.sampled_path) for p in params[C.INTERFEROGRAM_FILES]],
xlooks=params[C.IFG_LKSX], ylooks=params[C.IFG_LKSY],
user_exts=(params[C.IFG_XFIRST], params[C.IFG_YFIRST], params[
C.IFG_XLAST], params[C.IFG_YLAST])
)
msg = "Supplied {} value is outside the bounds of the interferogram data"
lat_lon_txt = ''
if (lon < xmin) or (lon > xmax):
lat_lon_txt += 'longitude'
if (lat < ymin) or (lat > ymax):
lat_lon_txt += ' and latitude' if lat_lon_txt else 'latitude'
if lat_lon_txt:
raise RefPixelError(msg.format(lat_lon_txt))
class RefPixelError(Exception):
"""
Generic exception for reference pixel errors.
"""
def ref_pixel_calc_wrapper(params: dict) -> Tuple[int, int]:
"""
Wrapper for reference pixel calculation
"""
__validate_supplied_lat_lon(params)
ifg_paths = [ifg_path.tmp_sampled_path for ifg_path in params[C.INTERFEROGRAM_FILES]]
lon = params[C.REFX]
lat = params[C.REFY]
ifg = Ifg(ifg_paths[0])
ifg.open(readonly=True)
# assume all interferograms have same projection and will share the same transform
transform = ifg.dataset.GetGeoTransform()
ref_pixel_file = Configuration.ref_pixel_path(params)
def __reuse_ref_pixel_file_if_exists():
if ref_pixel_file.exists():
refx, refy = np.load(ref_pixel_file)
log.info('Reusing pre-calculated ref-pixel values: ({}, {}) from file {}'.format(
refx, refy, ref_pixel_file.as_posix()))
log.warning("Reusing ref-pixel values from previous run!!!")
params[C.REFX_FOUND], params[C.REFY_FOUND] = int(refx), int(refy)
return int(refx), int(refy)
else:
return None, None
# read and return
refx, refy = mpiops.run_once(__reuse_ref_pixel_file_if_exists)
if (refx is not None) and (refy is not None):
update_refpix_metadata(ifg_paths, int(refx), int(refy), transform, params)
return refx, refy
if lon == -1 or lat == -1:
log.info('Searching for best reference pixel location')
half_patch_size, thresh, grid = ref_pixel_setup(ifg_paths, params)
process_grid = mpiops.array_split(grid)
save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params)
mean_sds = _ref_pixel_mpi(process_grid, half_patch_size, ifg_paths, thresh, params)
mean_sds = mpiops.comm.gather(mean_sds, root=0)
if mpiops.rank == MAIN_PROCESS:
mean_sds = np.hstack(mean_sds)
refpixel_returned = mpiops.run_once(find_min_mean, mean_sds, grid)
if isinstance(refpixel_returned, ValueError):
raise RefPixelError(
"Reference pixel calculation returned an all nan slice!\n"
"Cannot continue downstream computation. Please change reference pixel algorithm used before "
"continuing.")
refy, refx = refpixel_returned # row first means first value is latitude
log.info('Selected reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
lon, lat = convert_pixel_value_to_geographic_coordinate(refx, refy, transform)
log.info('Selected reference pixel coordinate (lon, lat): ({}, {})'.format(lon, lat))
else:
log.info('Using reference pixel from config file (lon, lat): ({}, {})'.format(lon, lat))
log.warning("Ensure user supplied reference pixel values are in lon/lat")
refx, refy = convert_geographic_coordinate_to_pixel_value(lon, lat, transform)
log.info('Converted reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
np.save(file=ref_pixel_file, arr=[int(refx), int(refy)])
update_refpix_metadata(ifg_paths, refx, refy, transform, params)
log.debug("refpx, refpy: "+str(refx) + " " + str(refy))
ifg.close()
params[C.REFX_FOUND], params[C.REFY_FOUND] = int(refx), int(refy)
return int(refx), int(refy)
|
"""
统计标注文件 xml 文件中图片的尺寸信息,宽高和深度
"""
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
ann_path = "E:/VOC/VOC2007/Annotations/" # xml 文件所在的路径
xml_file_list = [ann_path+i for i in os.listdir(ann_path) if i.endswith("xml")]
width_list = []
height_list= []
depth_list = []
for xml_file in xml_file_list:
xml_parse = ET.parse(xml_file).getroot()
# [i for i in xml_parse]
for size in xml_parse.iter('size'):
width = int(size.find("width").text)
width_list.append(width)
height = int(size.find("height").text)
height_list.append(height)
depth = int(size.find("depth").text)
if depth == 1:
print(xml_file)
depth_list.append(depth)
print(np.unique(depth_list))
plt.scatter(width_list, height_list)
plt.xlabel("width")
plt.ylabel("height")
plt.show() |
import logging
import sys
import arrow
import requests
GROUPME_NO_MESSAGE_STATUS_CODE = 304
# logging setup
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stdoutput = logging.StreamHandler(sys.stdout)
stdoutput.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(stdoutput)
class GroupMeAPI():
headers = {
'X-Access-Token': None
}
#
def __init__(self, base_url, group_id, group_name, access_token):
self.base_url = base_url
self.group_id = group_id
self.group_name = group_name
self.headers['X-Access-Token'] = access_token
@classmethod
def groupme_message_has_image_or_video(cls, message):
attachments = message['attachments']
for attach in attachments:
if attach['type'] == 'image' or attach['type'] == 'video':
return True
return False
@classmethod
def get_first_image_or_video_attachment_from_groupme_message(cls, message):
attachments = message['attachments']
for attach in attachments:
if attach['type'] == 'image' or attach['type'] == 'video':
return attach
raise Exception('No image or Video Attchment: {}'.format(message))
def get_groups(self):
url = '{}/groups'.format(self.base_url)
res = requests.get(url, headers=self.headers)
res.raise_for_status()
return res.json()['response']
def verfify_group_exists(self):
groups = self.get_groups()
for group in groups:
if group['group_id'] == self.group_id and group['name'] == self.group_name:
logger.info('found group {} with {} messages'.format(self.group_name, group['messages']['count']))
return True
raise Exception('working with invalid group {}'.format(groups))
def get_all_multi_media_messages_iter(self):
'''Get only those messages that contain a picure or video
'''
for message in self._get_all_messages_iter():
if self.groupme_message_has_image_or_video(message):
yield message
def get_all_multi_media_messages_after_iter(self, message_id):
'''Get only those messages that contain a picure or video
'''
for message in self._get_messages_after(message_id):
if self.groupme_message_has_image_or_video(message):
yield message
def _get_messages(self, limit=100):
url = '{}/groups/{}/messages'.format(self.base_url, self.group_id)
res = requests.get(url, headers=self.headers, params={'limit': limit})
res.raise_for_status()
return res.json()['response']['messages']
def _get_all_messages_iter(self):
'''Gets all messages in descending order of date created
'''
# Get most recent message becuase it's ID is needed to paginate backwards in time
most_recent_message = self._get_messages(1)[0]
yield most_recent_message
oldest_message_id = most_recent_message['id']
messages = self._get_messages_before(oldest_message_id)
while messages:
logger.info('found {} messages, from {} to {}'.format(len(messages), arrow.get(messages[1]['created_at']), arrow.get(messages[-1]['created_at'])))
for message in messages:
yield message
oldest_message_id = messages[-1]['id']
messages = self._get_messages_before(oldest_message_id)
logger.info('finished fetching all messages')
def _get_all_messages_after_iter(self, og_message_id):
'''Gets all messages in descending order of date created
'''
oldest_message_id = og_message_id
messages = self._get_messages_after(oldest_message_id)
while messages:
logger.info('found {} messages, from {} to {}'.format(len(messages), arrow.get(messages[1]['created_at']), arrow.get(messages[-1]['created_at'])))
for message in messages:
yield message
oldest_message_id = messages[0]['id']
messages = self._get_messages_after(oldest_message_id)
logger.info('finished fetching all messages created after {}'.format(og_message_id))
def _get_messages_before(self, groupme_message_id):
'''Gets 100 messages that occurred immediately before the given id
This will return an empty list if no messages exist before
'''
limit = 100
url = '{}/groups/{}/messages'.format(self.base_url, self.group_id)
res = requests.get(url, headers=self.headers, params={'limit': limit, 'before_id': groupme_message_id})
if res.status_code == GROUPME_NO_MESSAGE_STATUS_CODE:
logging.info('no more messages before {}'.format(groupme_message_id))
return []
res.raise_for_status()
return res.json()['response']['messages']
def _get_messages_after(self, groupme_message_id):
'''Gets 100 messages that occurred immediately after the given id
This will return an empty list if no messages exist after
'''
limit = 100
url = '{}/groups/{}/messages'.format(self.base_url, self.group_id)
res = requests.get(url, headers=self.headers, params={'limit': limit, 'after_id': groupme_message_id})
if res.status_code == GROUPME_NO_MESSAGE_STATUS_CODE:
logging.info('no more messages after {}'.format(groupme_message_id))
return []
res.raise_for_status()
return res.json()['response']['messages']
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from amu_bachelor_thesis_app.models import User
@login_required
def login_redirect(request):
if not request.user.is_authenticated:
return HttpResponse('Unauthorized', status=401)
if request.user.role == User.SUPERUSER:
return HttpResponseRedirect(reverse('amu_bachelor_thesis:home'))
return HttpResponseRedirect(reverse('amu_bachelor_thesis:search_engine'))
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
''' IMPORTS '''
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
SERVER_NAME = demisto.params()['server']
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
BASE_URL = SERVER_NAME + 'centreon/api/index.php?'
USE_SSL = False if demisto.params().get('insecure') else True
DEFAULT_HEADERS = {
'Content-Type': 'application/json'
}
''' HELPER FUNCTIONS '''
def httpRequest(method, urlSuffix, data, headers):
data = {} if data is None else data
url = BASE_URL + urlSuffix
LOG('running %s request with url=%s\theaders=%s' % (method, url, headers))
try:
res = requests.request(method,
url,
verify=USE_SSL,
params=data,
headers=headers
)
res.raise_for_status()
return res.json()
except Exception as e:
LOG(e)
raise e
def httpPost(urlSuffix, data=None, files=None):
data = {} if data is None else data
url = BASE_URL + urlSuffix
LOG('running request with url=%s\tdata=%s\tfiles=%s' % (url, data, files))
try:
res = requests.post(url, data=data, verify=USE_SSL)
res.raise_for_status()
return res.json()
except Exception as e:
LOG(e)
raise e
def login():
# retrieves an authentication token from Centreon
cmd_url = 'action=authenticate'
data = {
'username': USERNAME,
'password': PASSWORD
}
result = httpPost(cmd_url, data=data)
return result['authToken']
def transform_host_vals(key, value):
# "down" and "unreachable" states can be added here after their value would be known
host_service_status = {
'0': "Up",
'4': "Pending"
}
if (key == 'State' and value in host_service_status):
return host_service_status[value]
return value
def to_upper_camel_case(word):
return ''.join(x.capitalize() or '_' for x in word.split('_'))
''' COMMANDS FUNCTIONS '''
def get_host_status():
""" Returns the status of the connected hosts. """
args = demisto.args()
token = login()
DEFAULT_HEADERS['centreon-auth-token'] = token
cmd_url = 'object=centreon_realtime_hosts&action=list'
return httpRequest('GET', cmd_url, args, DEFAULT_HEADERS)
def get_host_status_command():
""" corresponds to 'centreon-get-host-status' command. Brings the status of the connected hosts."""
response = get_host_status()
if (len(response) == 0):
return "No Hosts found"
# changing the keys from underscore notation to UpperCamelCase notation
camel_case_response = [dict((to_upper_camel_case(k), v) for k, v in dic.iteritems()) for dic in response]
# for the human readable - only including keys which has values. Also, transforming values from ints to readable text
list_for_md = [dict((k, transform_host_vals(k, v)) for k, v in dic.iteritems() if (v == 0 or v))
for dic in camel_case_response]
entry = {
'Type': entryTypes['note'],
'Contents': camel_case_response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Centreon Hosts status', list_for_md),
'EntryContext': {
'Centreon.Host(val.Id==obj.Id)': camel_case_response
}
}
return entry
def get_service_status():
""" Returns the status of the connected services. """
args = demisto.args()
token = login()
DEFAULT_HEADERS['centreon-auth-token'] = token
cmd_url = 'object=centreon_realtime_services&action=list'
return httpRequest('GET', cmd_url, args, DEFAULT_HEADERS)
def get_service_status_command():
""" corresponds to 'centreon-get-service-status' command. Brings the status of the connected services. """
response = get_service_status()
if (len(response) == 0):
return "No Services found"
# changing the keys from underscore notation to UpperCamelCase notation
camel_case_response = [dict((to_upper_camel_case(k), v) for k, v in dic.iteritems()) for dic in response]
# for the human readable - only including keys which has values. Also, transforming values from ints to readable text
list_for_md = [dict((k, transform_host_vals(k, v)) for k, v in dic.iteritems() if (v == 0 or v))
for dic in camel_case_response]
entry = {
'Type': entryTypes['note'],
'Contents': camel_case_response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Centreon Services status', list_for_md),
'EntryContext': {
'Centreon.Service(val.ServiceId==obj.ServiceId)': camel_case_response
}
}
return entry
''' EXECUTION CODE '''
LOG('command is %s' % (demisto.command(), ))
try:
handle_proxy()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
if get_host_status():
demisto.results('ok')
else:
demisto.results('test failed')
elif demisto.command() == 'centreon-get-host-status':
demisto.results(get_host_status_command())
elif demisto.command() == 'centreon-get-service-status':
demisto.results(get_service_status_command())
except Exception as e:
LOG(e.message)
LOG.print_log()
raise
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : np_utils
# @Time : 2020/11/12 11:35 上午
# @Author : yuanjie
# @Email : yuanjie@xiaomi.com
# @Software : PyCharm
# @Description :
import numpy as np
# 分组
# np.array_split(range(6), 3)
# iteration_utilities.split
# iteration_utilities.grouper([1,2,3,4], 2) | xlist
# 展平
"""
l=[[1,2,3],[4,[5],[6,7]],[8,[9,[10]]]]*1000
from iteration_utilities import deepflatten
_ = list(deepflatten(l)) # 快十倍
_ = sum(l, [])
"""
def normalize(x):
if len(x.shape) > 1:
return x / np.clip(x ** 2, 1e-12, None).sum(axis=1).reshape((-1, 1) + x.shape[2:]) ** 0.5
else:
return x / np.clip(x ** 2, 1e-12, None).sum() ** 0.5
|
# Generated by Django 2.0 on 2017-12-25 05:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0004_auto_20171221_0621'),
]
operations = [
migrations.AddField(
model_name='game',
name='num_players',
field=models.PositiveIntegerField(default=5),
),
]
|
from django.core.cache import cache
from datetime import datetime
from museum_site.models.detail import Detail
from museum_site.models.file import File
from museum_site.constants import TERMS_DATE
from museum_site.common import (
DEBUG, EMAIL_ADDRESS, BOOT_TS, CSS_INCLUDES, UPLOAD_CAP, env_from_host,
qs_sans
)
from museum_site.core.detail_identifiers import *
def museum_global(request):
data = {}
# Debug mode
if DEBUG or request.GET.get("DEBUG") or request.session.get("DEBUG"):
data["debug"] = True
else:
data["debug"] = False
# Server info
data["HOST"] = request.get_host()
data["ENV"] = env_from_host(data["HOST"])
data["PROTOCOL"] = "https" if request.is_secure() else "http"
data["DOMAIN"] = data["PROTOCOL"] + "://" + data["HOST"]
# Server date/time
data["datetime"] = datetime.utcnow()
if data["datetime"].day == 27: # Drupe Day
data["drupe"] = True
if data["datetime"].day == 1 and data["datetime"].month == 4: # April 1st
data["april"] = True
# Common query string modifications
data["qs_sans_page"] = qs_sans(request.GET, "page")
data["qs_sans_view"] = qs_sans(request.GET, "view")
data["qs_sans_both"] = qs_sans(request.GET, ["page", "view"])
# E-mail
data["EMAIL_ADDRESS"] = EMAIL_ADDRESS
data["BOOT_TS"] = BOOT_TS
# CSS Files
data["CSS_INCLUDES"] = CSS_INCLUDES
# Featured Worlds
data["fg"] = File.objects.featured_worlds().order_by("?").first()
if request.GET.get("fgid"):
data["fg"] = File.objects.reach(pk=int(request.GET["fgid"]))
if data["fg"]:
data["fg"].extra_context = {"nozoom": True}
data["fg"] = data["fg"]
# Upload Cap
data["UPLOAD_CAP"] = UPLOAD_CAP
# Queue size
data["UPLOAD_QUEUE_SIZE"] = cache.get("UPLOAD_QUEUE_SIZE", "-")
# User TOS Date checks
if request.user.is_authenticated:
if (
TERMS_DATE > request.user.profile.accepted_tos and
request.method == "GET" and
request.path != "/user/update-tos/"
):
# Force a new login
for key in [
"_auth_user_id", "_auth_user_backend", "_auth_user_hash"
]:
if request.session.get(key):
del request.session[key]
return data
|
# -*- coding: utf-8
import sys
import os
import io
import stat
import errno
import operator
import functools
from . import os, strings, collections
from .itertools import filterfalse
from .operator import methodcaller, identity as _sanitize_path_sep
from zipfile import *
import zipfile as _zipfile
__all__ = _zipfile.__all__
if os.sep != "/":
_sanitize_path_sep = methodcaller(str.replace, os.sep, "/")
class ZipFile(_zipfile.ZipFile):
"""Extends zipfile.ZipFile with in-archive resolution of symbolic links"""
_max_path = 64 << 10
def getinfo(self, name, pwd=None, *, follow_symlinks=False,
fail_missing=True
):
if follow_symlinks:
return self._resolve_path(name, pwd, fail_missing)
if isinstance(name, ZipInfo):
return name
name = os.fspath(name)
return self._check_missing(self.NameToInfo.get(name), name, fail_missing)
def open(self, path, mode='r', pwd=None, *, follow_symlinks=False,
fail_missing=True, **kwargs
):
path = self.getinfo(
path, pwd, follow_symlinks=follow_symlinks, fail_missing=fail_missing)
return path and super().open(path, mode, pwd, **kwargs)
def read(self, path, pwd=None, *, follow_symlinks=True, fail_missing=True):
path = self.getinfo(
path, pwd, follow_symlinks=follow_symlinks, fail_missing=fail_missing)
return path and super().read(path, pwd)
def extract(self, member, path=None, pwd=None, *, follow_symlinks=False,
fail_missing=True
):
member = self.getinfo(
member, pwd, follow_symlinks=follow_symlinks, fail_missing=fail_missing)
success = member is not None
if success:
super().extract(member, path, pwd)
return success
def _resolve_path(self, path, pwd, fail_missing):
if isinstance(path, ZipInfo):
path = path.filename
else:
path = _sanitize_path_sep(os.fspath(path))
assert os.sep == "/" or os.sep not in path
is_dir = path.endswith("/")
path = path.strip("/")
inspected = []
uninspected = path.split("/")
uninspected.reverse()
seen_set = collections.ExtSet()
c_info = None
while uninspected:
c_info = self._resolve_path_component(
inspected, uninspected, pwd, seen_set)
if is_dir and inspected:
inspected.append("")
c_info = self.NameToInfo.get("/".join(inspected))
return self._check_missing(c_info, path, fail_missing)
def _resolve_path_component(self, inspected, uninspected, pwd, seen_set):
c = uninspected.pop()
#_eprintf('_resolve_path_component(): {!r}, {!r}, {!r}', inspected, c, uninspected)
if not c or c == os.curdir:
return None
if c == os.pardir:
if not inspected:
uninspected.append(c)
uninspected.reverse()
raise self._OSError(
errno.ENOENT, 'Path points outside of this archive',
"/".join(uninspected))
inspected.pop()
return None
inspected.append(c)
c_full = "/".join(inspected)
c_info = self.NameToInfo.get(c_full)
if c_info is None or not stat.S_ISLNK(c_info.external_attr >> 16):
if self.debug >= 2:
_eprintf('{:s}: {!r}',
('Not a symlink', 'Does not exist')[c_info is None],
':'.join((self.filename, c_full)))
return c_info
if _info_is_dir(c_info):
raise BadZipFile(
"{:s}:{!r} claims to be both a directory and a symbolic link."
.format(self.filename, c_info))
if len(c_info.filename) - len(c) + c_info.file_size > self._max_path:
raise self._OSError(errno.ENAMETOOLONG, None, c_info.filename)
if not seen_set.add(c_info.filename):
raise self._OSError(errno.ELOOP, None, c_info.filename)
uninspected.extend(reversed(
self._read_symlink(c_info, pwd).rstrip("/").split("/")))
inspected.pop()
return c_info
def _read_symlink(self, info, pwd):
if info.flag_bits & 0x800:
encoding = "utf-8"
errmsg = None
else:
encoding = "ascii"
errmsg = (
"Non-ASCII character in symbolic link with legacy file name encoding")
with io.TextIOWrapper(super().open(info, "r", pwd), encoding) as fp:
try:
target = fp.read(self._max_path)
except UnicodeDecodeError:
raise self._OSError(errno.EILSEQ, errmsg, info.filename)
assert not fp.read(1), (
"The size of {info.filename!r} inside {self.filename!r} "
"({info.file_size:d}) exceeds "
"{type.__module__:s}.{type.__qualname__:s}._max_path "
"({self._max_path:d})."
.format(type=type(self), self=self, info=info))
for f_test, errmsg in self._read_symlink_tests:
if f_test is not None and f_test(target):
raise self._OSError(errno.ENOENT, errmsg, info.filename)
if self.debug >= 2:
_eprintf("Found symbolic link: {!r} => {!r}",
":".join((self.filename, info.filename)), target)
return target
_read_symlink_tests = (
(operator.not_, "Empty symbolic link"),
(methodcaller(str.startswith, "/"),
"Absolute symbolic link target inside an archive"),
(methodcaller(str.__contains__, "\0"), "NUL char in symbolic link"),
)
def _check_missing(self, info, path, fail_missing):
if info is None and fail_missing:
raise KeyError(
'There is no item named {!r} in the archive {!r}'
.format(path, self.filename))
return info
def _OSError(self, err, msg=None, filename=None, filename2=None):
if filename is None:
filename = self.filename
else:
filename = ':'.join((self.filename, filename))
return OSError(err, msg or os.strerror(err), filename, None, filename2)
try:
_info_is_dir = _zipfile.ZipInfo.is_dir
except AttributeError:
def _info_is_dir(info):
return info.filename.endswith("/")
def _eprintf(fmt, *args):
return print(fmt.format(*args), file=sys.stderr)
def _parse_args(args):
import argparse
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.exit(2,
'{:s}Error: {:s}\nPlease use the options "-h" or "--help" for more '
'detailled usage info.\n'
.format(self.format_usage(), message))
ap = ArgumentParser(
description='Show symbolic link targets inside ZIP archives.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
ap.add_argument('archive',
type=argparse.FileType('rb'),
help='Path to a ZIP archive')
ap.add_argument('paths', nargs='+',
help='Archive member paths to inspect')
ap.add_argument('-L', '--follow-symlinks', metavar='N',
type=int, default=1,
help='Follow symbolic links during archive member inspection if N != 0.')
ap.add_argument('-h', '--help', dest=argparse.SUPPRESS,
action='help', help=argparse.SUPPRESS)
apdg = ap.add_mutually_exclusive_group()
apdg.add_argument('-d', dest='debug',
action='count', default=0,
help='Increase debugging level by 1. Can be specified multiple times.')
apdg.add_argument('--debug', dest='debug',
metavar='N', type=int, default=0,
help='Set debugging level directly.')
return ap.parse_args(args)
def _main(args=None):
args = _parse_args(args)
with args.archive, ZipFile(args.archive) as archive:
archive.debug = args.debug
getinfo = functools.partial(ZipFile.getinfo, archive,
follow_symlinks=args.follow_symlinks, fail_missing=False)
for path in args.paths:
resolved_info = getinfo(path)
if resolved_info is not None:
print('{:s}: {!r} => {!r}'.format(
archive.filename, path, resolved_info.filename))
else:
_eprintf(
'{:s}: {!r} => No such archive entry or dangling symbolic link',
archive.filename, path)
if __name__ == '__main__':
_main()
|
import base64
import hashlib
import hmac
import json
import re
import time
from collections import defaultdict
import numpy as np
import pandas as pd
import pytz
import requests
# import six
from six import iteritems
from catalyst.assets._assets import TradingPair
from logbook import Logger
from catalyst.exchange.exchange_bundle import ExchangeBundle
from catalyst.exchange.poloniex.poloniex_api import Poloniex_api
# from websocket import create_connection
from catalyst.exchange.exchange import Exchange
from catalyst.exchange.exchange_errors import (
ExchangeRequestError,
InvalidHistoryFrequencyError,
InvalidOrderStyle, OrderCancelError,
OrphanOrderReverseError)
from catalyst.exchange.exchange_execution import ExchangeLimitOrder, \
ExchangeStopLimitOrder, ExchangeStopOrder
from catalyst.finance.order import Order, ORDER_STATUS
from catalyst.protocol import Account
from catalyst.exchange.exchange_utils import get_exchange_symbols_filename, \
download_exchange_symbols
from catalyst.finance.transaction import Transaction
from catalyst.constants import LOG_LEVEL
log = Logger('Poloniex', level=LOG_LEVEL)
class Poloniex(Exchange):
def __init__(self, key, secret, base_currency, portfolio=None):
self.api = Poloniex_api(key=key, secret=secret.encode('UTF-8'))
self.name = 'poloniex'
self.assets = {}
self.load_assets()
self.base_currency = base_currency
self._portfolio = portfolio
self.minute_writer = None
self.minute_reader = None
self.transactions = defaultdict(list)
self.num_candles_limit = 2000
self.max_requests_per_minute = 60
self.request_cpt = dict()
self.bundle = ExchangeBundle(self)
def sanitize_curency_symbol(self, exchange_symbol):
"""
Helper method used to build the universal pair.
Include any symbol mapping here if appropriate.
:param exchange_symbol:
:return universal_symbol:
"""
return exchange_symbol.lower()
def _create_order(self, order_status):
"""
Create a Catalyst order object from the Exchange order dictionary
:param order_status:
:return: Order
"""
# if order_status['is_cancelled']:
# status = ORDER_STATUS.CANCELLED
# elif not order_status['is_live']:
# log.info('found executed order {}'.format(order_status))
# status = ORDER_STATUS.FILLED
# else:
status = ORDER_STATUS.OPEN
amount = float(order_status['amount'])
# filled = float(order_status['executed_amount'])
filled = None
if order_status['type'] == 'sell':
amount = -amount
# filled = -filled
price = float(order_status['rate'])
order_type = order_status['type']
stop_price = None
limit_price = None
# TODO: is this comprehensive enough?
# if order_type.endswith('limit'):
# limit_price = price
# elif order_type.endswith('stop'):
# stop_price = price
# executed_price = float(order_status['avg_execution_price'])
executed_price = price
# TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.
commission = None
# date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))
# date = pytz.utc.localize(date)
date = None
order = Order(
dt=date,
asset=self.assets[order_status['symbol']],
# No such field in Poloniex
amount=amount,
stop=stop_price,
limit=limit_price,
filled=filled,
id=str(order_status['orderNumber']),
commission=commission
)
order.status = status
return order, executed_price
def get_balances(self):
log.debug('retrieving wallets balances')
try:
balances = self.api.returnbalances()
except Exception as e:
log.debug(e)
raise ExchangeRequestError(error=e)
if 'error' in balances:
raise ExchangeRequestError(
error='unable to fetch balance {}'.format(balances['error'])
)
std_balances = dict()
for (key, value) in iteritems(balances):
currency = key.lower()
std_balances[currency] = float(value)
return std_balances
@property
def account(self):
account = Account()
account.settled_cash = None
account.accrued_interest = None
account.buying_power = None
account.equity_with_loan = None
account.total_positions_value = None
account.total_positions_exposure = None
account.regt_equity = None
account.regt_margin = None
account.initial_margin_requirement = None
account.maintenance_margin_requirement = None
account.available_funds = None
account.excess_liquidity = None
account.cushion = None
account.day_trades_remaining = None
account.leverage = None
account.net_leverage = None
account.net_liquidation = None
return account
@property
def time_skew(self):
# TODO: research the time skew conditions
return pd.Timedelta('0s')
def get_account(self):
# TODO: fetch account data and keep in cache
return None
def get_candles(self, data_frequency, assets, bar_count=None,
start_dt=None, end_dt=None):
"""
Retrieve OHLVC candles from Poloniex
:param data_frequency:
:param assets:
:param bar_count:
:return:
Available Frequencies
---------------------
'5m', '15m', '30m', '2h', '4h', '1D'
"""
# TODO: implement end_dt and start_dt filters
if (
data_frequency == '5m' or data_frequency == 'minute'): # TODO: Polo does not have '1m'
frequency = 300
elif (data_frequency == '15m'):
frequency = 900
elif (data_frequency == '30m'):
frequency = 1800
elif (data_frequency == '2h'):
frequency = 7200
elif (data_frequency == '4h'):
frequency = 14400
elif (data_frequency == '1D' or data_frequency == 'daily'):
frequency = 86400
else:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
# Making sure that assets are iterable
asset_list = [assets] if isinstance(assets, TradingPair) else assets
ohlc_map = dict()
for asset in asset_list:
end = int(time.time())
if (bar_count is None):
start = end - 2 * frequency
else:
start = end - bar_count * frequency
try:
response = self.api.returnchartdata(self.get_symbol(asset),
frequency, start, end)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response:
raise ExchangeRequestError(
error='Unable to retrieve candles: {}'.format(
response.content)
)
def ohlc_from_candle(candle):
last_traded = pd.Timestamp.utcfromtimestamp(candle['date'])
last_traded = last_traded.replace(tzinfo=pytz.UTC)
ohlc = dict(
open=np.float64(candle['open']),
high=np.float64(candle['high']),
low=np.float64(candle['low']),
close=np.float64(candle['close']),
volume=np.float64(candle['volume']),
price=np.float64(candle['close']),
last_traded=last_traded
)
return ohlc
if bar_count is None:
ohlc_map[asset] = ohlc_from_candle(response[0])
else:
ohlc_bars = []
for candle in response:
ohlc = ohlc_from_candle(candle)
ohlc_bars.append(ohlc)
ohlc_map[asset] = ohlc_bars
return ohlc_map[assets] \
if isinstance(assets, TradingPair) else ohlc_map
def create_order(self, asset, amount, is_buy, style):
"""
Creating order on the exchange.
:param asset:
:param amount:
:param is_buy:
:param style:
:return:
"""
exchange_symbol = self.get_symbol(asset)
if isinstance(style, ExchangeLimitOrder) or isinstance(style,
ExchangeStopLimitOrder):
if isinstance(style, ExchangeStopLimitOrder):
log.warn('{} will ignore the stop price'.format(self.name))
price = style.get_limit_price(is_buy)
try:
if (is_buy):
response = self.api.buy(exchange_symbol, amount, price)
else:
response = self.api.sell(exchange_symbol, -amount, price)
except Exception as e:
raise ExchangeRequestError(error=e)
date = pd.Timestamp.utcnow()
if ('orderNumber' in response):
order_id = str(response['orderNumber'])
order = Order(
dt=date,
asset=asset,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
return order
else:
log.warn(
'{} order failed: {}'.format('buy' if is_buy else 'sell',
response['error']))
return None
else:
raise InvalidOrderStyle(exchange=self.name,
style=style.__class__.__name__)
def get_open_orders(self, asset='all'):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not 'all', return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If 'all' is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
return self.portfolio.open_orders
"""
TODO: Why going to the exchange if we already have this info locally?
And why creating all these Orders if we later discard them?
"""
try:
if (asset == 'all'):
response = self.api.returnopenorders('all')
else:
response = self.api.returnopenorders(self.get_symbol(asset))
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response:
raise ExchangeRequestError(
error='Unable to retrieve open orders: {}'.format(
order_statuses['message'])
)
print(self.portfolio.open_orders)
# TODO: Need to handle openOrders for 'all'
orders = list()
for order_status in response:
order, executed_price = self._create_order(
order_status) # will Throw error b/c Polo doesn't track order['symbol']
if asset is None or asset == order.sid:
orders.append(order)
return orders
def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
try:
order = self._portfolio.open_orders[order_id]
except Exception as e:
raise OrphanOrderError(order_id=order_id, exchange=self.name)
return order
# TODO: Need to decide whether we fetch orders locally or from exchnage
# The code below is ignored
try:
response = self.api.returnopenorders(self.get_symbol(order.sid))
except Exception as e:
raise ExchangeRequestError(error=e)
for o in response:
if (int(o['orderNumber']) == int(order_id)):
return order
return None
def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
if (isinstance(order_param, Order)):
order = order_param
else:
order = self._portfolio.open_orders[order_param]
try:
response = self.api.cancelorder(order.id)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response:
log.info(
'Unable to cancel order {order_id} on exchange {exchange} {error}.'.format(
order_id=order.id,
exchange=self.name,
error=response['error']
))
# raise OrderCancelError(
# order_id=order.id,
# exchange=self.name,
# error=response['error']
# )
self.portfolio.remove_order(order)
def tickers(self, assets):
"""
Fetch ticket data for assets
https://docs.bitfinex.com/v2/reference#rest-public-tickers
:param assets:
:return:
"""
symbols = self.get_symbols(assets)
log.debug('fetching tickers {}'.format(symbols))
try:
response = self.api.returnticker()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response:
raise ExchangeRequestError(
error='Unable to retrieve tickers: {}'.format(
response['error'])
)
ticks = dict()
for index, symbol in enumerate(symbols):
ticks[assets[index]] = dict(
timestamp=pd.Timestamp.utcnow(),
bid=float(response[symbol]['highestBid']),
ask=float(response[symbol]['lowestAsk']),
last_price=float(response[symbol]['last']),
low=float(response[symbol]['lowestAsk']),
# TODO: Polo does not provide low
high=float(response[symbol]['highestBid']),
# TODO: Polo does not provide high
volume=float(response[symbol]['baseVolume']),
)
log.debug('got tickers {}'.format(ticks))
return ticks
def generate_symbols_json(self, filename=None, source_dates=False):
symbol_map = {}
if not source_dates:
fn, r = download_exchange_symbols(self.name)
with open(fn) as data_file:
cached_symbols = json.load(data_file)
response = self.api.returnticker()
for exchange_symbol in response:
base, market = self.sanitize_curency_symbol(exchange_symbol).split(
'_')
symbol = '{market}_{base}'.format(market=market, base=base)
if (source_dates):
start_date = self.get_symbol_start_date(exchange_symbol)
else:
try:
start_date = cached_symbols[exchange_symbol]['start_date']
except KeyError as e:
start_date = time.strftime('%Y-%m-%d')
try:
end_daily = cached_symbols[exchange_symbol]['end_daily']
except KeyError as e:
end_daily = 'N/A'
try:
end_minute = cached_symbols[exchange_symbol]['end_minute']
except KeyError as e:
end_minute = 'N/A'
symbol_map[exchange_symbol] = dict(
symbol=symbol,
start_date=start_date,
end_daily=end_daily,
end_minute=end_minute,
)
if (filename is None):
filename = get_exchange_symbols_filename(self.name)
with open(filename, 'w') as f:
json.dump(symbol_map, f, sort_keys=True, indent=2,
separators=(',', ':'))
def get_symbol_start_date(self, symbol):
try:
r = self.api.returnchartdata(symbol, 86400, pd.to_datetime(
'2010-1-1').value // 10 ** 9)
except Exception as e:
raise ExchangeRequestError(error=e)
return time.strftime('%Y-%m-%d', time.gmtime(int(r[0]['date'])))
def check_open_orders(self):
"""
Need to override this function for Poloniex:
Loop through the list of open orders in the Portfolio object.
Check if any transactions have been executed:
If so, create a transaction and apply to the Portfolio.
Check if the order is still open:
If not, remove it from open orders
:return:
transactions: Transaction[]
"""
transactions = list()
if self.portfolio.open_orders:
for order_id in list(self.portfolio.open_orders):
order = self._portfolio.open_orders[order_id]
log.debug('found open order: {}'.format(order_id))
try:
order_open = self.get_order(order_id)
except Exception as e:
raise ExchangeRequestError(error=e)
if (order_open):
delta = pd.Timestamp.utcnow() - order.dt
log.info(
'order {order_id} still open after {delta}'.format(
order_id=order_id,
delta=delta)
)
try:
response = self.api.returnordertrades(order_id)
except Exception as e:
raise ExchangeRequestError(error=e)
if ('error' in response):
if (not order_open):
raise OrphanOrderReverseError(order_id=order_id,
exchange=self.name)
else:
for tx in response:
"""
We maintain a list of dictionaries of transactions that correspond to
partially filled orders, indexed by order_id. Every time we query
executed transactions from the exchange, we check if we had that
transaction for that order already. If not, we process it.
When an order if fully filled, we flush the dict of transactions
associated with that order.
"""
if (not filter(
lambda item: item['order_id'] == tx['tradeID'],
self.transactions[order_id])):
log.debug(
'Got new transaction for order {}: amount {}, price {}'.format(
order_id, tx['amount'], tx['rate']))
tx['amount'] = float(tx['amount'])
if (tx['type'] == 'sell'):
tx['amount'] = -tx['amount']
transaction = Transaction(
asset=order.asset,
amount=tx['amount'],
dt=pd.to_datetime(tx['date'], utc=True),
price=float(tx['rate']),
order_id=tx['tradeID'],
# it's a misnomer, but keeping it for compatibility
commission=float(tx['fee'])
)
self.transactions[order_id].append(transaction)
self.portfolio.execute_transaction(transaction)
transactions.append(transaction)
if (not order_open):
"""
Since transactions have been executed individually
the only thing left to do is remove them from list of open_orders
"""
del self.portfolio.open_orders[order_id]
del self.transactions[order_id]
return transactions
def get_orderbook(self, asset, order_type='all'):
exchange_symbol = asset.exchange_symbol
data = self.api.returnOrderBook(market=exchange_symbol)
result = dict()
for order_type in data:
# TODO: filter by type
if order_type != 'asks' and order_type != 'bids':
continue
result[order_type] = []
for entry in data[order_type]:
if len(entry) == 2:
result[order_type].append(
dict(
rate=float(entry[0]),
quantity=float(entry[1])
)
)
return result
|
"""
Intended to be imported by a lambda through 'package_objects'
"""
MAGIC_VALUE = 21
def magic_function():
return MAGIC_VALUE
|
"""
Set operations for arrays based on sorting.
:Contains:
unique,
isin,
ediff1d,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
import functools
import numpy as np
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d', 'isin'
]
def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
return (ary, to_end, to_begin)
@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, ..., -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
# force a 1d array
ary = np.asanyarray(ary).ravel()
# enforce that the dtype of `ary` is used for the output
dtype_req = ary.dtype
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
if to_begin is None:
l_begin = 0
else:
to_begin = np.asanyarray(to_begin)
if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
to_end = np.asanyarray(to_end)
if not np.can_cast(to_end, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
l_diff = max(len(ary) - 1, 0)
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
result = ary.__array_wrap__(result)
if l_begin > 0:
result[:l_begin] = to_begin
if l_end > 0:
result[l_begin + l_diff:] = to_end
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
return result
def _unpack_tuple(x):
""" Unpacks one-element tuples for use as return values """
if len(x) == 1:
return x[0]
else:
return x
def _unique_dispatcher(ar, return_index=None, return_inverse=None,
return_counts=None, axis=None):
return (ar,)
@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : array_like
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
.. versionadded:: 1.9.0
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. Object arrays or structured arrays
that contain objects are not supported if the `axis` kwarg is used. The
default is None.
.. versionadded:: 1.13.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
repeat : Repeat elements of an array.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
(move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'], dtype='<U1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'], dtype='<U1')
Reconstruct the input array from the unique values and inverse:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
Reconstruct the input values from the unique values and counts:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> values, counts = np.unique(a, return_counts=True)
>>> values
array([1, 2, 3, 4, 6])
>>> counts
array([1, 3, 1, 1, 1])
>>> np.repeat(values, counts)
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
return _unpack_tuple(ret)
# axis was specified and not None
try:
ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim) from None
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
ar = np.ascontiguousarray(ar)
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
# At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
# data type with `m` fields where each field has the data type of `ar`.
# In the following, we create the array `consolidated`, which has
# shape `(n,)` with data type `dtype`.
try:
if ar.shape[1] > 0:
consolidated = ar.view(dtype)
else:
# If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
# a data type with itemsize 0, and the call `ar.view(dtype)` will
# fail. Instead, we'll use `np.empty` to explicitly create the
# array with shape `(len(ar),)`. Because `dtype` in this case has
# itemsize 0, the total size of the result is still 0 bytes.
consolidated = np.empty(len(ar), dtype=dtype)
except TypeError as e:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=ar.dtype)) from e
def reshape_uniq(uniq):
n = len(uniq)
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(n, *orig_shape[1:])
uniq = np.moveaxis(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
return_inverse, return_counts)
output = (reshape_uniq(output[0]),) + output[1:]
return _unpack_tuple(output)
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
mask = np.empty(aux.shape, dtype=np.bool_)
mask[:1] = True
mask[1:] = aux[1:] != aux[:-1]
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = np.cumsum(mask) - 1
inv_idx = np.empty(mask.shape, dtype=np.intp)
inv_idx[perm] = imask
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
ret += (np.diff(idx),)
return ret
def _intersect1d_dispatcher(
ar1, ar2, assume_unique=None, return_indices=None):
return (ar1, ar2)
@array_function_dispatch(_intersect1d_dispatcher)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
unique, incorrect results and out-of-bounds indices could result.
Default is False.
return_indices : bool
If True, the indices which correspond to the intersection of the two
arrays are returned. The first instance of a value is used if there are
multiple. Default is False.
.. versionadded:: 1.15.0
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
comm1 : ndarray
The indices of the first occurrences of the common values in `ar1`.
Only provided if `return_indices` is True.
comm2 : ndarray
The indices of the first occurrences of the common values in `ar2`.
Only provided if `return_indices` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
To intersect more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
To return the indices of the values common to the input arrays
along with the intersected values:
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
>>> x_ind, y_ind
(array([0, 2, 4]), array([1, 0, 2]))
>>> xy, x[x_ind], y[y_ind]
(array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
"""
ar1 = np.asanyarray(ar1)
ar2 = np.asanyarray(ar2)
if not assume_unique:
if return_indices:
ar1, ind1 = unique(ar1, return_index=True)
ar2, ind2 = unique(ar2, return_index=True)
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
else:
ar1 = ar1.ravel()
ar2 = ar2.ravel()
aux = np.concatenate((ar1, ar2))
if return_indices:
aux_sort_indices = np.argsort(aux, kind='mergesort')
aux = aux[aux_sort_indices]
else:
aux.sort()
mask = aux[1:] == aux[:-1]
int1d = aux[:-1][mask]
if return_indices:
ar1_indices = aux_sort_indices[:-1][mask]
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
if not assume_unique:
ar1_indices = ind1[ar1_indices]
ar2_indices = ind2[ar2_indices]
return int1d, ar1_indices, ar2_indices
else:
return int1d
def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
return (ar1, ar2)
@array_function_dispatch(_setxor1d_dispatcher)
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
return aux[flag[1:] & flag[:-1]]
def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None):
return (ar1, ar2)
@array_function_dispatch(_in1d_dispatcher)
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
We recommend using :func:`isin` instead of `in1d` for new code.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `ar1` is in `ar2` and True otherwise).
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``np.invert(in1d(a, b))``.
.. versionadded:: 1.8.0
Returns
-------
in1d : (M,) ndarray, bool
The values `ar1[in1d]` are in `ar2`.
See Also
--------
isin : Version of this function that preserves the
shape of ar1.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
However, this idea fails if `ar2` is a set, or similar (non-sequence)
container: As ``ar2`` is converted to an array, in those cases
``asarray(ar2)`` is an object array rather than the expected array of
contained values.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True])
>>> test[mask]
array([0, 2, 0])
>>> mask = np.in1d(test, states, invert=True)
>>> mask
array([False, True, False, True, False])
>>> test[mask]
array([1, 5])
"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# Check if one of the arrays may contain arbitrary objects
contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
# This code is run when
# a) the first condition is true, making the code significantly faster
# b) the second condition is true (i.e. `ar1` or `ar2` may contain
# arbitrary objects), since then sorting is not guaranteed to work
if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
if invert:
mask = np.ones(len(ar1), dtype=bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
ret = np.empty(ar.shape, dtype=bool)
ret[order] = flag
if assume_unique:
return ret[:len(ar1)]
else:
return ret[rev_idx]
def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None):
return (element, test_elements)
@array_function_dispatch(_isin_dispatcher)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input array.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is an array or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted, as if
calculating `element not in test_elements`. Default is False.
``np.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``np.invert(np.isin(a, b))``.
Returns
-------
isin : ndarray, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to arrays if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object array with one element, rather than an
array of the values contained in `test_elements`. This is a consequence
of the `array` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
.. versionadded:: 1.13.0
Examples
--------
>>> element = 2*np.arange(4).reshape((2, 2))
>>> element
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = np.isin(element, test_elements)
>>> mask
array([[False, True],
[ True, False]])
>>> element[mask]
array([2, 4])
The indices of the matched values can be obtained with `nonzero`:
>>> np.nonzero(mask)
(array([0, 1]), array([1, 0]))
The test can also be inverted:
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
[False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> np.isin(element, test_set)
array([[False, False],
[False, False]])
Casting the set to a list gives the expected result:
>>> np.isin(element, list(test_set))
array([[False, True],
[ True, False]])
"""
element = np.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def _union1d_dispatcher(ar1, ar2):
return (ar1, ar2)
@array_function_dispatch(_union1d_dispatcher)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2), axis=None))
def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
return (ar1, ar2)
@array_function_dispatch(_setdiff1d_dispatcher)
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setdiff1d : ndarray
1D array of values in `ar1` that are not in `ar2`. The result
is sorted when `assume_unique=False`, but otherwise only sorted
if the input is sorted.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if assume_unique:
ar1 = np.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
|
from __future__ import division
from __future__ import print_function
import math
import mxnext as X
from models.retinanet.builder import RetinaNetHead
class FreeAnchorRetinaNet(object):
def __init__(self):
pass
@staticmethod
def get_train_symbol(backbone, neck, head):
gt_bbox = X.var("gt_bbox")
im_info = X.var("im_info")
feat = backbone.get_rpn_feature()
feat = neck.get_rpn_feature(feat)
head.get_anchor()
loss = head.get_loss(feat, gt_bbox, im_info)
return X.group(loss)
@staticmethod
def get_test_symbol(backbone, neck, head):
im_info = X.var("im_info")
im_id = X.var("im_id")
rec_id = X.var("rec_id")
feat = backbone.get_rpn_feature()
feat = neck.get_rpn_feature(feat)
head.get_anchor()
cls_score, bbox_xyxy = head.get_prediction(feat, im_info)
return X.group([rec_id, im_id, im_info, cls_score, bbox_xyxy])
class FreeAnchorRetinaNetHead(RetinaNetHead):
def __init__(self, pRpn):
super().__init__(pRpn)
# reinit bias for cls
prior_prob = 0.02
pi = - math.log((1 - prior_prob) / prior_prob)
self.cls_pred_bias = X.var("cls_pred_bias", init=X.constant(pi))
self.anchor_dict = None
def get_anchor(self):
p = self.p
num_anchor = len(p.anchor_generate.ratio) * len(p.anchor_generate.scale)
stride = p.anchor_generate.stride
anchor_dict = {}
for s in stride:
max_side = p.anchor_generate.max_side // s
anchors = X.var("anchor_stride%s" % s,
shape=(1, 1, max_side, max_side, num_anchor * 4),
dtype='float32') # (1, 1, long_side, long_side, #anchor * 4)
anchor_dict["stride%s" % s] = anchors
self.anchor_dict = anchor_dict
def get_loss(self, conv_feat, gt_bbox, im_info):
import mxnet as mx
p = self.p
stride = p.anchor_generate.stride
if not isinstance(stride, tuple):
stride = (stride)
num_class = p.num_class
num_base_anchor = len(p.anchor_generate.ratio) * len(p.anchor_generate.scale)
image_per_device = p.batch_image
cls_logit_dict, bbox_delta_dict = self.get_output(conv_feat)
cls_logit_reshape_list = []
bbox_delta_reshape_list = []
feat_list = []
scale_loss_shift = 128.0 if p.fp16 else 1.0
# reshape logit and delta
for s in stride:
# (N, A * C, H, W) -> (N, A * C, H * W)
cls_logit = X.reshape(
data=cls_logit_dict["stride%s" % s],
shape=(0, 0, -1),
name="cls_stride%s_reshape" % s
)
# (N, A * 4, H, W) -> (N, A * 4, H * W)
bbox_delta = X.reshape(
data=bbox_delta_dict["stride%s" % s],
shape=(0, 0, -1),
name="bbox_stride%s_reshape" % s
)
cls_logit_reshape_list.append(cls_logit)
bbox_delta_reshape_list.append(bbox_delta)
feat_list.append(cls_logit_dict["stride%s" % s])
# cls_logits -> (N, H' * W' * A, C)
cls_logits = X.concat(cls_logit_reshape_list, axis=2, name="cls_logit_concat")
cls_logits = X.transpose(cls_logits, axes=(0, 2, 1), name="cls_logit_transpose")
cls_logits = X.reshape(cls_logits, shape=(0, -1, num_class - 1), name="cls_logit_reshape")
cls_prob = X.sigmoid(cls_logits)
# bbox_deltas -> (N, H' * W' * A, 4)
bbox_deltas = X.concat(bbox_delta_reshape_list, axis=2, name="bbox_delta_concat")
bbox_deltas = X.transpose(bbox_deltas, axes=(0, 2, 1), name="bbox_delta_transpose")
bbox_deltas = X.reshape(bbox_deltas, shape=(0, -1, 4), name="bbox_delta_reshape")
anchor_list = [self.anchor_dict["stride%s" % s] for s in stride]
bbox_thr = p.anchor_assign.bbox_thr
pre_anchor_top_n = p.anchor_assign.pre_anchor_top_n
alpha = p.focal_loss.alpha
gamma = p.focal_loss.gamma
anchor_target_mean = p.head.mean or (0, 0, 0, 0)
anchor_target_std = p.head.std or (1, 1, 1, 1)
from models.FreeAnchor.ops import _prepare_anchors, _positive_loss, _negative_loss
anchors = _prepare_anchors(
mx.sym, feat_list, anchor_list, image_per_device, num_base_anchor)
positive_loss = _positive_loss(
mx.sym, anchors, gt_bbox, cls_prob, bbox_deltas, image_per_device,
alpha, pre_anchor_top_n, anchor_target_mean, anchor_target_std
)
positive_loss = X.make_loss(
data=positive_loss,
grad_scale=1.0 * scale_loss_shift,
name="positive_loss"
)
negative_loss = _negative_loss(
mx.sym, anchors, gt_bbox, cls_prob, bbox_deltas, im_info, image_per_device,
num_class, alpha, gamma, pre_anchor_top_n, bbox_thr,
anchor_target_mean, anchor_target_std
)
negative_loss = X.make_loss(
data=negative_loss,
grad_scale=1.0 * scale_loss_shift,
name="negative_loss"
)
return positive_loss, negative_loss
def get_prediction(self, conv_feat, im_info):
import mxnet as mx
p = self.p
num_class = p.num_class
stride = p.anchor_generate.stride
if not isinstance(stride, tuple):
stride = (stride)
pre_nms_top_n = p.proposal.pre_nms_top_n
anchor_target_mean = p.head.mean or (0, 0, 0, 0)
anchor_target_std = p.head.std or (1, 1, 1, 1)
cls_logit_dict, bbox_delta_dict = self.get_output(conv_feat)
from models.FreeAnchor.ops import _proposal_retina
cls_score_list = []
bbox_xyxy_list = []
for s in stride:
cls_prob = X.sigmoid(data=cls_logit_dict["stride%s" % s])
bbox_delta = bbox_delta_dict["stride%s" % s]
anchors = self.anchor_dict["stride%s" % s]
pre_nms_top_n_level = -1 if s == max(stride) else pre_nms_top_n
bbox_xyxy, cls_score = _proposal_retina(
F=mx.sym,
cls_prob=cls_prob,
bbox_pred=bbox_delta,
anchors=anchors,
im_info=im_info,
batch_size=1,
rpn_pre_nms_top_n=pre_nms_top_n_level,
num_class=num_class,
anchor_mean=anchor_target_mean,
anchor_std=anchor_target_std
)
cls_score_list.append(cls_score)
bbox_xyxy_list.append(bbox_xyxy)
cls_score = X.concat(cls_score_list, axis=1, name="cls_score_concat")
bbox_xyxy = X.concat(bbox_xyxy_list, axis=1, name="bbox_xyxy_concat")
return cls_score, bbox_xyxy
|
"""
Java processes running on a machine.
"""
|
# Copyright 2016, 2021 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test cases for Customer Model
Test cases can be run with:
nosetests
coverage report -m
While debugging just these tests it's convenient to use this:
nosetests --stop tests/test_models.py:TestPetModel
"""
import os
import logging
import unittest
from werkzeug.exceptions import NotFound
from service.models import Customer, DataValidationError, db
from service import app
from .factories import CustomerFactory
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgresql://postgres:postgres@localhost:5432/testdb"
)
######################################################################
# C U S T O M E R M O D E L T E S T C A S E S
######################################################################
# pylint: disable=too-many-public-methods
class TestCustomerModel(unittest.TestCase):
"""Test Cases for Customer Model"""
@classmethod
def setUpClass(cls):
"""This runs once before the entire test suite"""
app.config["TESTING"] = True
app.config["DEBUG"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
Customer.init_db(app)
@classmethod
def tearDownClass(cls):
"""This runs once after the entire test suite"""
db.session.close()
def setUp(self):
"""This runs before each test"""
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
"""This runs after each test"""
db.session.remove()
db.drop_all()
######################################################################
# H E L P E R M E T H O D S
######################################################################
def _create_customer(self):
""" Creates a Customer from a Factory """
fake_customer = CustomerFactory()
customer = Customer(
first_name = fake_customer.first_name,
last_name = fake_customer.last_name,
email = fake_customer.email,
phone_number = fake_customer.phone_number
)
self.assertTrue(customer != None)
self.assertEqual(customer.id, None)
return customer
######################################################################
# T E S T C A S E S
######################################################################
def test_create_a_customer(self):
""" Create a Customer and assert that it exists """
fake_customer = CustomerFactory()
customer = Customer(
first_name = fake_customer.first_name,
last_name = fake_customer.last_name,
email = fake_customer.email,
phone_number = fake_customer.phone_number
)
self.assertTrue(customer != None)
self.assertEqual(customer.id, None)
self.assertEqual(customer.first_name, fake_customer.first_name)
self.assertEqual(customer.last_name, fake_customer.last_name)
self.assertEqual(customer.email, fake_customer.email)
self.assertEqual(customer.phone_number, fake_customer.phone_number)
def test_add_a_customer(self):
""" Creates a customer and adds it to the database """
customers = Customer.all()
self.assertEqual(customers, [])
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
customers = Customer.all()
self.assertEqual(len(customers),1)
def test_update_customer(self):
""" Update a customer """
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows in the database
self.assertEqual(customer.id, 1)
# Fetch it back
customer = Customer.find(customer.id)
customer.email = "XXX@YYY.COM"
customer.save()
# Fetch it back again
customer = Customer.find(customer.id)
self.assertEqual(customer.email, "XXX@YYY.COM")
def test_delete_a_customer(self):
""" Delete an account from the database """
customers = Customer.all()
self.assertEqual(customers, [])
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
customers = Customer.all()
self.assertEqual(len(customers), 1)
customer = customers[0]
customer.delete()
customers = Customer.all()
self.assertEqual(len(customers), 0)
def test_find_or_404(self):
""" Find or throw 404 error """
customer = self._create_customer()
customer.create()
# Assert that it was assigned an id and shows up in the database
self.assertEqual(customer.id, 1)
# Fetch it back
customer = Customer.find_or_404(customer.id)
self.assertEqual(customer.id, 1)
def test_find_by_first_name(self):
""" Find by first name """
customer = self._create_customer()
customer.create()
# Fetch it back by name
same_customer = Customer.find_by_first_name(customer.first_name)[0]
self.assertEqual(same_customer.id, customer.id)
self.assertEqual(same_customer.first_name, customer.first_name)
def test_find_by_last_name(self):
""" Find by last name """
customer = self._create_customer()
customer.create()
# Fetch it back by name
same_customer = Customer.find_by_last_name(customer.last_name)[0]
self.assertEqual(same_customer.id, customer.id)
self.assertEqual(same_customer.last_name, customer.last_name)
def test_serialize_a_customer(self):
""" Serialize a customer """
customer = self._create_customer()
serial_customer = customer.serialize()
self.assertEqual(serial_customer['id'], customer.id)
self.assertEqual(serial_customer['first_name'], customer.first_name)
self.assertEqual(serial_customer['last_name'], customer.last_name)
self.assertEqual(serial_customer['email'], customer.email)
self.assertEqual(serial_customer['phone_number'], customer.phone_number)
def test_deserialize_a_customer(self):
""" Deserialize a customer """
customer = self._create_customer()
serial_customer = customer.serialize()
new_customer = Customer()
new_customer.deserialize(serial_customer)
self.assertEqual(new_customer.id, customer.id)
self.assertEqual(new_customer.first_name, customer.first_name)
self.assertEqual(new_customer.last_name, customer.last_name)
self.assertEqual(new_customer.email, customer.email)
self.assertEqual(new_customer.phone_number, customer.phone_number)
def test_deserialize_with_key_error(self):
""" Deserialize a customer with a KeyError """
customer = Customer()
self.assertRaises(DataValidationError, customer.deserialize, {})
def test_deserialize_with_type_error(self):
""" Deserialize a customer with a TypeError """
customer = Customer()
self.assertRaises(DataValidationError, customer.deserialize, []) |
# BlooP allows only bounded loops (for loop with predetermined set of values)
def two_to_the_three_to_the(n):
three_pow = 1
for i in range(n):
three_pow = three_pow * 3
two_pow = 1
for i in range(three_pow):
two_pow = two_pow * 2
print(two_pow)
def is_prime(n):
if n == 0:
return False
factor = 2
for factor in range(2, n):
if n % factor == 0:
return False
return True
def is_goldbach(n):
for factor in range(2, n):
if is_prime(factor) and is_prime(n - factor):
return True
factor = factor + 1
return False
def factorial(n):
output = 1
for i in range(1, n + 1):
output = output * i
return output
def fibonacci(n):
if n <= 2:
return 1
a = 1
b = 1
for i in range(2, n):
new_a = b
b = a + b
a = new_a
return b
|
# You are given a string S. Find the lexicographically smallest string S′ obtained by permuting the characters of S.
# Here, for different two strings s=s1s2…sn and t=t1t2…tm, s<t holds lexicographically when one of the conditions below is satisfied.
# There is an integer i (1≤i≤min(n,m)) such that si<ti and sj=tj for all integersj (1≤j<i).
# si=ti for all integers i (1≤i≤min(n,m)), and n<m.
S = input()
#find the lexicographically smallest string
# for i in range(len(S)):
# for j in range(i+1, len(S)):
# if(S[i] > S[j]):
# S = S[:i] + S[j] + S[i+1:j] + S[i] + S[j+1:]
# break
# print(S)
SS = sorted(S)
SS = "".join(SS)
print(SS) |
#!/usr/bin/env python
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage, Image
import numpy as np
class ImageAverage(object):
def __init__(self):
# Save the name of the node
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initialzing." %(self.node_name))
# Initialize CV bridge
self.bridge = CvBridge()
# Initialize state variables:
self.image_avg = None
self.image_count = 0
# Setup publishers
self.pub_image_out = rospy.Publisher("~image_avg", Image, queue_size=1)
# Setup subscriber
self.sub_image_in = rospy.Subscriber("~image_in", CompressedImage, self.avgImage)
rospy.loginfo("[%s] Initialzed." %(self.node_name))
def avgImage(self,image_msg):
# Convert image message to CV float32 image:
#image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
image_cv = np.float32(image_cv)
# Incorporate image in running average (initialize if first image):
self.image_count += 1.0
if (self.image_count > 1):
weight = 1.0/self.image_count
self.image_avg = cv2.addWeighted(self.image_avg,(1-weight),image_cv,weight,0)
else:
self.image_avg = image_cv
# Convert average image to image message:
img_out = np.uint8(self.image_avg)
image_out_msg = self.bridge.cv2_to_imgmsg(img_out, "bgr8")
image_out_msg.header.stamp = image_msg.header.stamp
# Publish average image:
self.pub_image_out.publish(image_out_msg)
def on_shutdown(self):
rospy.loginfo("[%s] Shutting down." %(self.node_name))
if __name__ == '__main__':
# Initialize the node with rospy
rospy.init_node('image_average_joewl', anonymous=False)
# Create the NodeName object
image_average_joewl_node = ImageAverage()
# Setup proper shutdown behavior
rospy.on_shutdown(image_average_joewl_node.on_shutdown)
# Keep it spinning to keep the node alive
rospy.spin()
|
import os
import glob
import argparse
from argparse import RawTextHelpFormatter
from subprocess import call
import imageio as imgio
import numpy as np
import skimage
from swc_to_tiff_stack import swc_to_tiff_stack
parser = argparse.ArgumentParser(add_help=True, \
description='Trace filaments in input images', \
formatter_class=RawTextHelpFormatter)
parser.add_argument('--infld', help='input folder', required=True)
parser.add_argument('--outfld', help='output folder', required=True)
# UNCOMMENT THE NEXT TWO LINES AFTER ADDING THE WORKFLOW TO BIAFLOWS
parser.add_argument('--threshold_value', help='threshold intensity value', required=True)
parser.add_argument('--quality_run', \
help='outputs trace with higher quality (or not)', \
required=True)
args = parser.parse_args()
in_path = args.infld + '/'
out_path = args.outfld + '/'
print(out_path + ' is the output file path')
# Functional parameters UNCOMMENT THE NEXT TWO LINES AFTER ADDING THE WORKFLOW TO BIAFLOWS
#Add a test to see if the threshold parameter value is None (Default value is zero but somehow 0 is changed to None)
if args.threshold_value is None:
threshold_value=0
else:
threshold_value = float(args.threshold_value) # default: 0
quality_run = bool(args.quality_run) # defalut: False
# temporary attribute values to test locally COMMENT THE FOLLOWING TWO LINES AFTER ADDING THE WORKFLOW TO BIAFLOWS
# threshold_value = '5'
# quality_run = True
print("Starting workflow!")
#print(os.listdir(in_path))
images = (glob.glob(in_path+"/*.tif"))
listdir = os.listdir(in_path)
in_images = [f for f in listdir if f.endswith('.' + 'tif')]
# out_file_path = [out_path + '/' + item for item in in_images]
for neubias_input_image in in_images:
# set input and output file path
in_file_path = in_path + neubias_input_image
out_file_path = out_path + neubias_input_image
#file_path = neubias_input_image.filepath
#filename = neubias_input_image.filename
#out_file_path = os.path.join(out_path, filename)
#print('doing ' + file_path)
# Invert the xy axis by 180 degrees / in other words, flip the image vertically
print('invert the xy axis by 180 degrees for ' + in_file_path)
# reads image and rotates it with numpy.flip
img = skimage.external.tifffile.imread(in_file_path)
img = np.flip(img, axis=1)
skimage.external.tifffile.imsave(in_file_path, img)
print("Finished running: 180 degrees image rotation in xy axis")
print('---------------------------')
#print('Doing '+ in_path + neubias_input_image + \
# ' and saving the output to ' + \
# out_path)
print("Doing {}{} and saving the output to {}".format(in_path, neubias_input_image, out_path))
#Compute the neuron tracing with set parameters
if quality_run is True:
command = "rtrace -f {} -o {}.swc --threshold {} --quality".format(in_file_path,
out_file_path[:-4],
threshold_value)
#command = "rtrace -f " + in_file_path + \
#" -o " + out_file_path[:-4] + ".swc --threshold " + threshold_value + " --quality"
else:
command = "rtrace -f {} -o {}.swc --threshold {}".format(in_file_path,
out_file_path[:-4],
threshold_value)
#command = "rtrace -f " + in_file_path + \
#" -o " + out_file_path[:-4] + ".swc --threshold " + threshold_value
print("Run tracing workflow:"+command)
return_code = call(command, shell=True, cwd="/") # waits for the subprocess to return
print("Finished running :"+command)
#im_size = imgio.imread(os.path.join(out_path, filename)).shape
im_size = imgio.volread(in_file_path).shape #Size is Depth,Height,Width
im_size = im_size[::-1] #Invert the size order to Width,Height,Depth
print('size of the image is: ' + str(im_size))
#Needed for some vaa3d workflow where the output path is not taken into account.
#os.rename(out_file_path[:-4]+".tif_ini.swc", out_file_path[:-4]+ ".swc")
print("Run:"+' swc_to_tiff_stack('+ out_file_path[:-4] + \
'.swc, '+ out_path +','+ str(im_size)+')')
# call node_sorter functions to order swc and saves it with the same name and path
#swc_node_sorter(out_file_path[:-4]+".swc")
command = "/usr/bin/xvfb-run Vaa3D_CentOS_64bit_v3.458/vaa3d -x sort_neuron_swc -f sort_swc -i " + \
out_file_path[:-4]+".swc" + " -o " + out_file_path[:-4]+".swc"
return_code = call(command, shell=True, cwd="/") # waits for the subprocess to return
# Convert the .swc tracing result to tiff stack files
swc_to_tiff_stack(out_file_path[:-4]+".swc", out_path, im_size)
print('Finished converting swc files to image stacks')
#TODO: error handling...
print("Done")
|
import streamlit as st
import datetime
from store_page import *
from cart_page import *
from market_page import *
from db_functions import *
from register_page import *
from order_history import *
def main():
st.title("Kroger Market")
pages = {
"Market": market_page,
"Cart": cart_page,
"Register": register_page,
"OrderHistory": order_history,
"Store":store_page,
}
page = st.selectbox("Go to:", tuple(pages.keys()))
pages[page]()
if __name__ == "__main__":
main()
|
import pyttsx3
friend = pyttsx3.init()
speech = input("Say Something: ")
friend.say(speech)
friend.runAndWait() |
"""
Tests for prediction pipeline
"""
import pytest
from eogrow.utils.testing import create_folder_dict, run_and_test_pipeline
@pytest.fixture(scope="session", name="folders")
def config_folder_fixture(config_folder, stats_folder):
return create_folder_dict(config_folder, stats_folder, "mapping")
@pytest.mark.chain
@pytest.mark.order(after=["test_rasterize.py::test_rasterize_pipeline_features"])
@pytest.mark.parametrize("experiment_name", ["mapping_ref"])
def test_mapping_pipeline_on_reference_data(experiment_name, folders):
run_and_test_pipeline(experiment_name, **folders)
@pytest.mark.order(after=["test_prediction.py::test_prediction_pipeline"])
@pytest.mark.parametrize("experiment_name", ["mapping_pred"])
def test_mapping_pipeline_on_predictions(experiment_name, folders):
run_and_test_pipeline(experiment_name, **folders)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file simo_swt_classification_tut.py
# @author Kyeong Soo (Joseph) Kim <kyeongsoo.kim@gmail.com>
# @date 2018-08-23
#
# @brief A scalable indoor localization system based on Wi-Fi fingerprinting
# using a stage-wise trained multi-class classification of building,
# floor, and location with a single-input and multi-output (SIMO) deep
# neural network (DNN) model and TUT datasets.
#
# @remarks TBD
### import basic modules and a model to test
import os
# os.environ['PYTHONHASHSEED'] = '0' # for reproducibility
import sys
sys.path.insert(0, '../models')
sys.path.insert(0, '../utils')
from deep_autoencoder import deep_autoencoder
from sdae import sdae
from mean_ci import mean_ci
### import other modules; keras and its backend will be loaded later
import argparse
import datetime
import math
import multiprocessing
import numpy as np
import pandas as pd
import pathlib
import random as rn
from collections import namedtuple
from num2words import num2words
from numpy.linalg import norm
from time import time
from timeit import default_timer as timer
### import keras and tensorflow backend
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # supress warning messages
import tensorflow as tf
num_cpus = multiprocessing.cpu_count()
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=num_cpus,
inter_op_parallelism_threads=num_cpus
)
from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Activation, Dense, Dropout, Input
from keras.layers.normalization import BatchNormalization
from keras.metrics import categorical_accuracy
from keras.models import Model
def simo_classification_tut(
gpu_id: int,
dataset: str,
frac: float,
validation_split: float,
preprocessor: str,
grid_size: float,
batch_size: int,
epochs: int,
optimizer: str,
dropout: float,
corruption_level: float,
num_neighbors: int,
scaling: float,
dae_hidden_layers: list,
sdae_hidden_layers: list,
cache: bool,
common_hidden_layers: list,
floor_hidden_layers: list,
location_hidden_layers: list,
floor_weight: float,
location_weight: float,
verbose: int
):
"""Multi-floor indoor localization based on floor and coordinates classification
using a single-input and multi-output (SIMO) deep neural network (DNN) model
and TUT datasets.
Keyword arguments:
"""
### initialize numpy, random, TensorFlow, and keras
np.random.seed() # based on current time or OS-specific randomness source
rn.seed() # "
tf.set_random_seed(rn.randint(0, 1000000))
if gpu_id >= 0:
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ''
sess = tf.Session(
graph=tf.get_default_graph(),
config=session_conf)
K.set_session(sess)
### load datasets after scaling
print("Loading data ...")
if dataset == 'tut':
from tut import TUT
tut = TUT(
cache=cache,
frac=frac,
preprocessor=preprocessor,
classification_mode='hierarchical',
grid_size=0)
elif dataset == 'tut2':
from tut import TUT2
tut = TUT2(
cache=cache,
frac=frac,
preprocessor=preprocessor,
classification_mode='hierarchical',
grid_size=0,
testing_split=0.2)
elif dataset == 'tut3':
from tut import TUT3
tut = TUT3(
cache=cache,
frac=frac,
preprocessor=preprocessor,
classification_mode='hierarchical',
grid_size=0)
else:
print("'{0}' is not a supported data set.".format(dataset))
sys.exit(0)
flr_height = tut.floor_height
training_df = tut.training_df
training_data = tut.training_data
testing_df = tut.testing_df
testing_data = tut.testing_data
### build and train a SIMO model
print(
"Building and training a SIMO model for classification ..."
)
rss = training_data.rss_scaled
coord = training_data.coord_scaled
coord_scaler = training_data.coord_scaler # for inverse transform
labels = training_data.labels
input = Input(shape=(rss.shape[1], ), name='input') # common input
# (optional) build deep autoencoder or stacked denoising autoencoder
if dae_hidden_layers != '':
print("- Building a DAE model ...")
model = deep_autoencoder(
dataset=dataset,
input_data=rss,
preprocessor=preprocessor,
hidden_layers=dae_hidden_layers,
cache=cache,
model_fname=None,
optimizer=optimizer,
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split)
x = model(input)
elif sdae_hidden_layers != '':
print("- Building an SDAE model ...")
model = sdae(
dataset=dataset,
input_data=rss,
preprocessor=preprocessor,
hidden_layers=sdae_hidden_layers,
cache=cache,
model_fname=None,
optimizer=optimizer,
corruption_level=corruption_level,
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split)
x = model(input)
else:
x = input
# common hidden layers
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout)(x)
if common_hidden_layers != '':
for units in common_hidden_layers:
x = Dense(units)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout)(x)
common_hl_output = x
# floor classification output
if floor_hidden_layers != '':
for units in floor_hidden_layers:
x = Dense(units)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout)(x)
x = Dense(labels.floor.shape[1])(x)
x = BatchNormalization()(x)
floor_output = Activation(
'softmax', name='floor_output')(x) # no dropout for an output layer
# location classification output
if location_hidden_layers != '':
for units in location_hidden_layers:
x = Dense(units)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(dropout)(x)
x = Dense(labels.location.shape[1])(x)
x = BatchNormalization()(x)
location_output = Activation(
'softmax', name='location_output')(x) # no dropout for an output layer
# build model
model = Model(
inputs=input,
outputs=[
floor_output,
location_output
])
# for stage-wise training with floor information only
model.compile(
optimizer=optimizer,
loss=[
'categorical_crossentropy',
'categorical_crossentropy'
],
loss_weights={
'floor_output': 1.0,
'location_output': 0.0
},
metrics={
'floor_output': 'accuracy',
'location_output': 'accuracy'
})
weights_file = os.path.expanduser("~/tmp/best_weights.h5")
checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0)
print("- Stage-wise training with floor information ...", end='')
startTime = timer()
f_history = model.fit(
x={'input': rss},
y={
'floor_output': labels.floor,
'location_output': labels.location
},
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=[checkpoint, early_stop],
validation_split=validation_split,
shuffle=True)
elapsedTime = timer() - startTime
print(" completed in {0:.4e} s".format(elapsedTime))
model.load_weights(weights_file) # load weights from the best model
# for stage-wise training with both floor and location information
model.compile(
optimizer=optimizer,
loss=[
'categorical_crossentropy',
'categorical_crossentropy'
],
loss_weights={
'floor_output': 1.0,
'location_output': 1.0
},
metrics={
'floor_output': 'accuracy',
'location_output': 'accuracy'
})
weights_file = os.path.expanduser("~/tmp/best_weights.h5")
checkpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0)
print("- Stage-wise training with both floor and location information ...", end='')
startTime = timer()
fl_history = model.fit(
x={'input': rss},
y={
'floor_output': labels.floor,
'location_output': labels.location
},
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=[checkpoint, early_stop],
validation_split=validation_split,
shuffle=True)
elapsedTime = timer() - startTime
print(" completed in {0:.4e} s".format(elapsedTime))
model.load_weights(weights_file) # load weights from the best model
### evaluate the model
print("Evaluating the model ...")
rss = testing_data.rss_scaled
labels = testing_data.labels
blds = labels.building
flrs = labels.floor
coord = testing_data.coord # original coordinates
x_col_name = 'X'
y_col_name = 'Y'
# calculate the classification accuracies and localization errors
flrs_pred, locs_pred = model.predict(rss, batch_size=batch_size)
flr_results = (np.equal(
np.argmax(flrs, axis=1), np.argmax(flrs_pred, axis=1))).astype(int)
flr_acc = flr_results.mean()
# calculate positioning error based on locations
n_samples = len(flrs)
n_locs = locs_pred.shape[1] # number of locations (reference points)
idxs = np.argpartition(
locs_pred, -num_neighbors)[:, -num_neighbors:] # (unsorted) indexes of up to num_neighbors nearest neighbors
threshold = scaling * np.amax(locs_pred, axis=1)
training_labels = np.concatenate((training_data.labels.floor,
training_data.labels.location), axis=1)
training_coord_avg = training_data.coord_avg
coord_est = np.zeros((n_samples, 2))
coord_est_weighted = np.zeros((n_samples, 2))
for i in range(n_samples):
xs = []
ys = []
ws = []
for j in idxs[i]:
if locs_pred[i][j] >= threshold[i]:
loc = np.zeros(n_locs)
loc[j] = 1
rows = np.where((training_labels == np.concatenate((flrs[i],
loc))).all(axis=1)) # tuple of row indexes
if rows[0].size > 0:
xs.append(training_df.loc[training_df.index[rows[0][0]],
x_col_name])
ys.append(training_df.loc[training_df.index[rows[0][0]],
y_col_name])
ws.append(locs_pred[i][j])
if len(xs) > 0:
coord_est[i] = np.array((xs, ys)).mean(axis=1)
coord_est_weighted[i] = np.array((np.average(xs, weights=ws),
np.average(ys, weights=ws)))
else:
if rows[0].size > 0:
key = str(np.argmax(blds[i])) + '-' + str(np.argmax(flrs[i]))
else:
key = str(np.argmax(blds[i]))
coord_est[i] = coord_est_weighted[i] = training_coord_avg[key]
# calculate 2D localization errors
dist_2d = norm(coord - coord_est, axis=1)
dist_weighted_2d = norm(coord - coord_est_weighted, axis=1)
mean_error_2d = dist_2d.mean()
mean_error_weighted_2d = dist_weighted_2d.mean()
median_error_2d = np.median(dist_2d)
median_error_weighted_2d = np.median(dist_weighted_2d)
# calculate 3D localization errors
flr_diff = np.absolute(
np.argmax(flrs, axis=1) - np.argmax(flrs_pred, axis=1))
z_diff_squared = (flr_height**2)*np.square(flr_diff)
dist_3d = np.sqrt(np.sum(np.square(coord - coord_est), axis=1) + z_diff_squared)
dist_weighted_3d = np.sqrt(np.sum(np.square(coord - coord_est_weighted), axis=1) + z_diff_squared)
mean_error_3d = dist_3d.mean()
mean_error_weighted_3d = dist_weighted_3d.mean()
median_error_3d = np.median(dist_3d)
median_error_weighted_3d = np.median(dist_weighted_3d)
LocalizationResults = namedtuple('LocalizationResults', ['flr_acc',
'mean_error_2d',
'mean_error_weighted_2d',
'median_error_2d',
'median_error_weighted_2d',
'mean_error_3d',
'mean_error_weighted_3d',
'median_error_3d',
'median_error_weighted_3d',
'elapsedTime'])
return LocalizationResults(flr_acc=flr_acc, mean_error_2d=mean_error_2d,
mean_error_weighted_2d=mean_error_weighted_2d,
median_error_2d=median_error_2d,
median_error_weighted_2d=median_error_weighted_2d,
mean_error_3d=mean_error_3d,
mean_error_weighted_3d=mean_error_weighted_3d,
median_error_3d=median_error_3d,
median_error_weighted_3d=median_error_weighted_3d,
elapsedTime=elapsedTime)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-N",
"--num_runs",
help=
"number of runs; default is 20",
default=20,
type=int)
parser.add_argument(
"-G",
"--gpu_id",
help=
"ID of GPU device to run this script; default is 0; set it to a negative number for CPU (i.e., no GPU)",
default=0,
type=int)
parser.add_argument(
"--dataset",
help="a data set for training, validation, and testing; choices are 'tut' (default) and 'tut2'",
default='tut',
type=str)
parser.add_argument(
"-F",
"--frac",
help=
"fraction of input data to load for training and validation; default is 1.0",
default=1.0,
type=float)
parser.add_argument(
"--validation_split",
help=
"fraction of training data to be used as validation data: default is 0.2",
default=0.2,
type=float)
parser.add_argument(
"-P",
"--preprocessor",
help=
"preprocessor to scale/normalize input data before training and validation; default is 'standard_scaler'",
default='standard_scaler',
type=str)
parser.add_argument(
"--grid_size",
help="size of a grid [m]; default is 5",
default=5,
type=float)
parser.add_argument(
"-B",
"--batch_size",
help="batch size; default is 64",
default=64,
type=int)
parser.add_argument(
"-E",
"--epochs",
help="number of epochs; default is 100",
default=100,
type=int)
parser.add_argument(
"-O",
"--optimizer",
help="optimizer; default is 'nadam'",
default='nadam',
type=str)
parser.add_argument(
"-D",
"--dropout",
help="dropout rate before and after hidden layers; default is 0.25",
default=0.25,
type=float)
parser.add_argument(
"-C",
"--corruption_level",
help=
"corruption level of masking noise for stacked denoising autoencoder; default is 0.1",
default=0.1,
type=float)
parser.add_argument(
"--num_neighbours",
help=
"number of (nearest) neighbour locations to consider in positioning; default is 8",
default=8,
type=int)
parser.add_argument(
"--scaling",
help=
"scaling factor for threshold (i.e., threshold=scaling*maximum) for the inclusion of nighbour locations to consider in positioning; default is 0.2",
default=0.2,
type=float)
parser.add_argument(
"--dae_hidden_layers",
help=
"comma-separated numbers of units in hidden layers for deep autoencoder; default is ''",
default='',
type=str)
parser.add_argument(
"--sdae_hidden_layers",
help=
"comma-separated numbers of units in hidden layers for stacked denoising autoencoder; default is '1024,1024,1024'",
default='1024,1024,1024',
type=str)
parser.add_argument(
"--no_cache",
help=
"disable loading a trained model from/saving it to a cache",
action='store_true')
parser.add_argument(
"--common_hidden_layers",
help=
"comma-separated numbers of units in common hidden layers; default is '1024'",
default='1024',
type=str)
parser.add_argument(
"--floor_hidden_layers",
help=
"comma-separated numbers of units in additional hidden layers for floor; default is '256'",
default='256',
type=str)
parser.add_argument(
"--location_hidden_layers",
help=
"comma-separated numbers of units in additional hidden layers for location; default is '256'",
default='256',
type=str)
parser.add_argument(
"--floor_weight",
help="loss weight for a floor; default 1.0",
default=1.0,
type=float)
parser.add_argument(
"--location_weight",
help="loss weight for a location; default 1.0",
default=1.0,
type=float)
parser.add_argument(
"-V",
"--verbose",
help=
"verbosity mode: 0 = silent, 1 = progress bar, 2 = one line per epoch; default is 0",
default=0,
type=int)
args = parser.parse_args()
# set variables using command-line arguments
num_runs = args.num_runs
gpu_id = args.gpu_id
dataset = args.dataset
frac = args.frac
validation_split = args.validation_split
preprocessor = args.preprocessor
grid_size = args.grid_size
batch_size = args.batch_size
epochs = args.epochs
optimizer = args.optimizer
dropout = args.dropout
corruption_level = args.corruption_level
num_neighbours = args.num_neighbours
scaling = args.scaling
if args.dae_hidden_layers == '':
dae_hidden_layers = ''
else:
dae_hidden_layers = [
int(i) for i in (args.dae_hidden_layers).split(',')
]
if args.sdae_hidden_layers == '':
sdae_hidden_layers = ''
else:
sdae_hidden_layers = [
int(i) for i in (args.sdae_hidden_layers).split(',')
]
cache = not args.no_cache
if args.common_hidden_layers == '':
common_hidden_layers = ''
else:
common_hidden_layers = [
int(i) for i in (args.common_hidden_layers).split(',')
]
if args.floor_hidden_layers == '':
floor_hidden_layers = ''
else:
floor_hidden_layers = [
int(i) for i in (args.floor_hidden_layers).split(',')
]
if args.location_hidden_layers == '':
location_hidden_layers = ''
else:
location_hidden_layers = [
int(i) for i in (args.location_hidden_layers).split(',')
]
floor_weight = args.floor_weight
location_weight = args.location_weight
verbose = args.verbose
### run simo_hybrid_tut() num_runs times
flr_accs = np.empty(num_runs)
mean_error_2ds = np.empty(num_runs)
mean_error_weighted_2ds = np.empty(num_runs)
median_error_2ds = np.empty(num_runs)
median_error_weighted_2ds = np.empty(num_runs)
mean_error_3ds = np.empty(num_runs)
mean_error_weighted_3ds = np.empty(num_runs)
median_error_3ds = np.empty(num_runs)
median_error_weighted_3ds = np.empty(num_runs)
elapsedTimes = np.empty(num_runs)
for i in range(num_runs):
print("\n########## {0:s} run ##########".format(num2words(i+1, to='ordinal_num')))
rst = simo_classification_tut(gpu_id, dataset, frac, validation_split,
preprocessor, grid_size, batch_size,
epochs, optimizer, dropout,
corruption_level, num_neighbours, scaling,
dae_hidden_layers, sdae_hidden_layers,
cache, common_hidden_layers,
floor_hidden_layers,
location_hidden_layers, floor_weight,
location_weight, verbose)
flr_accs[i] = rst.flr_acc
mean_error_2ds[i] = rst.mean_error_2d
mean_error_weighted_2ds[i] = rst.mean_error_weighted_2d
median_error_2ds[i] = rst.median_error_2d
median_error_weighted_2ds[i] = rst.median_error_weighted_2d
mean_error_3ds[i] = rst.mean_error_3d
mean_error_weighted_3ds[i] = rst.mean_error_weighted_3d
median_error_3ds[i] = rst.median_error_3d
median_error_weighted_3ds[i] = rst.median_error_weighted_3d
elapsedTimes[i] = rst.elapsedTime
### print out final results
base_dir = '../results/test/' + (os.path.splitext(
os.path.basename(__file__))[0]).replace('test_', '') + '/' + dataset
pathlib.Path(base_dir).mkdir(parents=True, exist_ok=True)
base_file_name = base_dir + "/E{0:d}_B{1:d}_D{2:.2f}".format(
epochs, batch_size, dropout)
now = datetime.datetime.now()
output_file_base = base_file_name + '_' + now.strftime("%Y%m%d-%H%M%S")
with open(output_file_base + '.org', 'w') as output_file:
output_file.write(
"#+STARTUP: showall\n") # unfold everything when opening
output_file.write("* System parameters\n")
output_file.write(" - Command line: %s\n" % ' '.join(sys.argv))
output_file.write(" - Number of runs: %d\n" % num_runs)
output_file.write(" - GPU ID: %d\n" % gpu_id)
output_file.write(
" - Fraction of data loaded for training and validation: %.2f\n" %
frac)
output_file.write(" - Validation split: %.2f\n" % validation_split)
output_file.write(
" - Preprocessor for scaling/normalizing input data: %s\n" %
preprocessor)
output_file.write(" - Grid size [m]: %d\n" % grid_size)
output_file.write(" - Batch size: %d\n" % batch_size)
output_file.write(" - Epochs: %d\n" % epochs)
output_file.write(" - Optimizer: %s\n" % optimizer)
output_file.write(" - Dropout rate: %.2f\n" % dropout)
output_file.write(
" - Number of (nearest) neighbour locations: %d\n" % num_neighbours)
output_file.write(" - Scaling factor for threshold: %.2f\n" % scaling)
output_file.write(" - Deep autoencoder hidden layers: ")
if dae_hidden_layers == '':
output_file.write("N/A\n")
else:
output_file.write("%d" % dae_hidden_layers[0])
for units in dae_hidden_layers[1:]:
output_file.write("-%d" % units)
output_file.write("\n")
output_file.write(" - Stacked denoising autoencoder hidden layers: ")
if sdae_hidden_layers == '':
output_file.write("N/A\n")
else:
output_file.write("%d" % sdae_hidden_layers[0])
for units in sdae_hidden_layers[1:]:
output_file.write("-%d" % units)
output_file.write("\n")
output_file.write(" - Common hidden layers: ")
if common_hidden_layers == '':
output_file.write("N/A\n")
else:
output_file.write("%d" % common_hidden_layers[0])
for units in common_hidden_layers[1:]:
output_file.write("-%d" % units)
output_file.write("\n")
output_file.write(" - Floor hidden layers: ")
if floor_hidden_layers == '':
output_file.write("N/A\n")
else:
output_file.write("%d" % floor_hidden_layers[0])
for units in floor_hidden_layers[1:]:
output_file.write("-%d" % units)
output_file.write("\n")
output_file.write(" - Location hidden layers: ")
if location_hidden_layers == '':
output_file.write("N/A\n")
else:
output_file.write("%d" % location_hidden_layers[0])
for units in location_hidden_layers[1:]:
output_file.write("-%d" % units)
output_file.write("\n")
output_file.write(" - Floor loss weight: %.2f\n" % floor_weight)
output_file.write(
" - Location loss weight: %.2f\n" % location_weight)
output_file.write("\n")
# output_file.write("* Model Summary\n")
# model.summary(print_fn=lambda x: output_file.write(x + '\n'))
# output_file.write("\n")
output_file.write("* Performance\n")
output_file.write(" - Floor hit rate [%]: Mean (w/ 95% CI)={0:.4f}+-{1:{ci_fs}}, Max={2:.4f}, Min={3:.4f}\n".format(*[i*100 for i in mean_ci(flr_accs)], 100*flr_accs.max(), 100*flr_accs.min(), ci_fs=('.4f' if num_runs > 1 else '')))
output_file.write(" - Mean 2D error [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(mean_error_2ds), mean_error_2ds.max(), mean_error_2ds.min()))
output_file.write(" - Mean 2D error (weighted) [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(mean_error_weighted_2ds), mean_error_weighted_2ds.max(), mean_error_weighted_2ds.min()))
output_file.write(" - Median 2D error [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={3:.4f}, Min={3:.4f}\n".format(*mean_ci(median_error_2ds), median_error_2ds.max(), median_error_2ds.min()))
output_file.write(" - Median 2D error (weighted) [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={3:.4f}, Min={3:.4f}\n".format(*mean_ci(median_error_weighted_2ds), median_error_weighted_2ds.max(), median_error_weighted_2ds.min()))
output_file.write(" - Mean 3D error [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(mean_error_3ds), mean_error_3ds.max(), mean_error_3ds.min()))
output_file.write(" - Mean 3D error (weighted) [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(mean_error_weighted_3ds), mean_error_weighted_3ds.max(), mean_error_weighted_3ds.min()))
output_file.write(" - Median 3D error [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(median_error_3ds), median_error_3ds.max(), median_error_3ds.min()))
output_file.write(" - Median 3D error (weighted) [m]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(median_error_weighted_3ds), median_error_weighted_3ds.max(), median_error_weighted_3ds.min()))
output_file.write(" - Training time [s]: Mean (w/ 95% CI)={0:.4f}+-{1:.4f}, Max={2:.4f}, Min={3:.4f}\n".format(*mean_ci(elapsedTimes), elapsedTimes.max(), elapsedTimes.min()))
|
"""Base schema module"""
from marshmallow import Schema, fields
from ..utilities.error_handler.handle_error import ValidationError
class BaseSchema(Schema):
"""Base schema for all models"""
id = fields.String(dump_only=True)
created_at = fields.String(dump_only=True, dump_to='createdAt')
updated_at = fields.String(dump_only=True, dump_to='updatedAt')
created_by = fields.String(dump_only=True, dump_to='createdBy')
updated_by = fields.String(dump_only=True, dump_to='updatedBy')
def load_object_into_schema(self, data, partial=False):
"""Helper function to load python objects into schema"""
data, errors = self.load(data, partial=partial)
if errors:
raise ValidationError(
dict(errors=errors, message='An error occurred'), 400)
return data
|
import sys
from typing import List
from models import ApiResponse
from inference.errors import Error
from starlette.responses import FileResponse
from starlette.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
from deep_learning_service import DeepLearningService
from fastapi import FastAPI, Form, File, UploadFile, Header
from inference.exceptions import ModelNotFound, InvalidModelConfiguration, ApplicationError, ModelNotLoaded, \
InferenceEngineNotFound, InvalidInputData
sys.path.append('./inference')
dl_service = DeepLearningService()
error_logging = Error()
app = FastAPI(version='1.0', title='BMW InnovationLab YOLOv4-v3 opencv inference Automation',
description="<b>API for performing YOLOv4 and YOLOv3 opencv inference</b></br></br>"
"<b>Contact the developers:</b></br>"
"<b>Antoine Charbel: <a href='mailto:antoine.charbel@inmind.ai'>antoine.charbel@inmind.ai</a></b></br>"
"<b>Hadi Koubeissy: <a href='mailto:123.hadikoubeissy@gmail.com'>123.hadikoubeissy@gmail.com</a></b></br>"
"<b>BMW Innovation Lab: <a href='mailto:innovation-lab@bmw.de'>innovation-lab@bmw.de</a></b>")
# app.mount("/public", StaticFiles(directory="/main/public"), name="public")
# app.add_middleware(
# CORSMiddleware,
# allow_origins=["*"],
# allow_credentials=True,
# allow_methods=["*"],
# allow_headers=["*"],
# )
@app.get('/load')
def load_custom():
"""
Loads all the available models.
:return: All the available models with their respective hashed values
"""
try:
error_logging.info('request successful;')
return dl_service.load_all_models()
except ApplicationError as e:
error_logging.warning(str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.post('/detect')
async def detect_custom(model: str = Form(...), image: UploadFile = File(...)):
"""
Performs a prediction for a specified image using one of the available models.
:param model: Model name or model hash
:param image: Image file
:return: Model's Bounding boxes
"""
draw_boxes = False
predict_batch = False
try:
output = await dl_service.run_model(model, image, draw_boxes, predict_batch)
error_logging.info('request successful;' + str(output))
return output
except ApplicationError as e:
error_logging.warning(model + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.post('/get_labels')
def get_labels_custom(model: str = Form(...)):
"""
Lists the model's labels with their hashed values.
:param model: Model name or model hash
:return: A list of the model's labels with their hashed values
"""
try :
error_logging.info('request successful;')
return dl_service.get_labels_custom(model)
except ModelNotFound as e :
error_logging.warning(model+' '+str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.get('/models/{model_name}/load')
async def load(model_name: str, force: bool = False):
"""
Loads a model specified as a query parameter.
:param model_name: Model name
:param force: Boolean for model force reload on each call
:return: APIResponse
"""
try:
dl_service.load_model(model_name, force)
error_logging.info('request successful;')
return ApiResponse(success=True)
except ApplicationError as e:
error_logging.warning(str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.get('/models')
async def list_models(user_agent: str = Header(None)):
"""
Lists all available models.
:param user_agent:
:return: APIResponse
"""
try :
error_logging.info('request successful;')
return ApiResponse(data={'models': dl_service.list_models()})
except Exception as e:
error_logging.error(str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.post('/models/{model_name}/predict')
async def run_model(model_name: str, input_data: UploadFile = File(...)):
"""
Performs a prediction by giving both model name and image file.
:param model_name: Model name
:param input_data: An image file
:return: APIResponse containing the prediction's bounding boxes
"""
try:
output = await dl_service.run_model(model_name, input_data, draw=False, predict_batch=False)
error_logging.info('request successful;' + str(output))
return ApiResponse(data=output)
except ApplicationError as e:
error_logging.warning(model_name + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model_name + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.post('/models/{model_name}/predict_batch', include_in_schema=False)
async def run_model_batch(model_name: str, input_data: List[UploadFile] = File(...)):
"""
Performs a prediction by giving both model name and image file(s).
:param model_name: Model name
:param input_data: A batch of image files or a single image file
:return: APIResponse containing prediction(s) bounding boxes
"""
try:
output = await dl_service.run_model(model_name, input_data, draw=False, predict_batch=True)
error_logging.info('request successful;' + str(output))
return ApiResponse(data=output)
except ApplicationError as e:
error_logging.warning(model_name + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model_name + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.post('/models/{model_name}/predict_image')
async def predict_image(model_name: str, input_data: UploadFile = File(...)):
"""
Draws bounding box(es) on image and returns it.
:param model_name: Model name
:param input_data: Image file
:return: Image file
"""
try:
output = await dl_service.run_model(model_name, input_data, draw=True, predict_batch=False)
error_logging.info('request successful;' + str(output))
return FileResponse("/main/result.jpg", media_type="image/jpg")
except ApplicationError as e:
error_logging.warning(model_name + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model_name + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.get('/models/{model_name}/labels')
async def list_model_labels(model_name: str):
"""
Lists all the model's labels.
:param model_name: Model name
:return: List of model's labels
"""
try :
labels = dl_service.get_labels(model_name)
error_logging.info('request successful;' + str(labels))
return ApiResponse(data=labels)
except ModelNotFound as e :
error_logging.warning(model_name + ';' + str(e))
return ApiResponse (success=False, error=e)
except Exception as e:
error_logging.error(model_name + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
@app.get('/models/{model_name}/config')
async def list_model_config(model_name: str):
"""
Lists all the model's configuration.
:param model_name: Model name
:return: List of model's configuration
"""
try:
config = dl_service.get_config(model_name)
error_logging.info('request successful;' + str(config))
return ApiResponse(data=config)
except ModelNotFound as e :
error_logging.warning(model_name + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model_name + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error')
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_2_1/RelValSingleMuPt100/GEN-SIM-RECO/MC_31X_V3-v1/0006/DC15F12B-9477-DE11-B1E0-000423D98C20.root',
'/store/relval/CMSSW_3_2_1/RelValSingleMuPt100/GEN-SIM-RECO/MC_31X_V3-v1/0006/40D6FEFD-8F77-DE11-95A7-001D09F27067.root',
'/store/relval/CMSSW_3_2_1/RelValSingleMuPt100/GEN-SIM-RECO/MC_31X_V3-v1/0005/50EE1208-8177-DE11-8B17-001D09F231B0.root'
)
)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(100)
)
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load("Configuration.StandardSequences.Reconstruction_cff")
from Configuration.StandardSequences.Reconstruction_cff import *
process.muonAnalyzer = cms.EDAnalyzer("MuonTimingValidator",
TKtracks = cms.untracked.InputTag("generalTracks"),
STAtracks = cms.untracked.InputTag("standAloneMuons"),
Muons = cms.untracked.InputTag("muons"),
nbins = cms.int32(60),
PtresMax = cms.double(2000.0),
CombinedTiming = cms.untracked.InputTag("muontiming","combined"),
DtTiming = cms.untracked.InputTag("muontiming","dt"),
CscTiming = cms.untracked.InputTag("muontiming","csc"),
simPtMin = cms.double(5.0),
PtresMin = cms.double(-1000.0),
PtCut = cms.double(1.0),
etaMax = cms.double(2.4),
etaMin = cms.double(0.0),
PlotScale = cms.double(1.0),
DTcut = cms.int32(8),
CSCcut = cms.int32(4),
open = cms.string('recreate'),
out = cms.string('test_timing.root')
)
process.prefer("GlobalTag")
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '80X_dataRun2_Prompt_v9', '')
process.p = cms.Path(muontiming)
process.mutest = cms.Path(process.muonAnalyzer)
process.schedule = cms.Schedule(process.p,process.mutest)
# process.schedule = cms.Schedule(process.mutest)
|
"""pynetdicom configuration options"""
LOG_HANDLER_LEVEL = 'standard'
"""Default (non-user) event logging
* If ``'none'`` then events will not be logged at all, however there will still
be some logging (warnings, errors, etc)
* If ``'standard'`` then certain events will be logged (association
negotiation, DIMSE messaging, etc)
Examples
--------
>>> from pynetdicom import _config
>>> _config.LOG_HANDLER_LEVEL = 'none'
"""
ENFORCE_UID_CONFORMANCE = False
"""Enforce UID conformance
.. versionadded:: 1.3
If ``True`` then UIDs will be checked to ensure they're conformant to the
DICOM Standard and if not then an appropriate response sent, otherwise
UIDs will only be checked to ensure they're no longer then 64 characters and
if not then an appropriate response sent.
Examples
--------
>>> from pynetdicom import _config
>>> _config.ENFORCE_UID_CONFORMANCE = True
"""
USE_SHORT_DIMSE_AET = True
"""Use short AE titles in DIMSE messages.
.. versionadded:: 1.5
If ``False`` then elements with a VR of AE in DIMSE messages will be padded
with trailing spaces up to the maximum allowable length (16 bytes), otherwise
no padding will be added.
Examples
--------
>>> from pynetdicom import _config
>>> _config.USE_SHORT_DIMSE_AET = False
"""
LOG_RESPONSE_IDENTIFIERS = True
"""Log incoming C-FIND, C-GET and C-MOVE response *Identifier* datasets.
.. versionadded:: 1.5
If ``True`` (default) then the *Identifier* datasets received in Pending
responses to C-FIND, C-GET and C-MOVE requests will be logged.
Examples
--------
>>> from pynetdicom import _config
>>> _config.LOG_RESPONSE_IDENTIFIERS = False
"""
LOG_REQUEST_IDENTIFIERS = True
"""Log incoming C-FIND, C-GET and C-MOVE request *Identifier* datasets.
.. versionadded:: 1.5
If ``True`` (default) then the *Identifier* datasets received in
C-FIND, C-GET and C-MOVE requests will be logged.
Examples
--------
>>> from pynetdicom import _config
>>> _config.LOG_REQUEST_IDENTIFIERS = False
"""
|
#!/usr/bin/python3
# -*- mode: python; coding: utf-8 -*-
# Copyright (C) 2014, Oscar Acena <oscaracena@gmail.com>
# This software is under the terms of Apache License v2 or later.
from __future__ import print_function
import sys
from gattlib import GATTRequester
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: {} <addr>".format(sys.argv[0]))
sys.exit(1)
requester = GATTRequester(sys.argv[1], False)
print("Connecting...")
sys.stdout.flush()
requester.connect(True)
primary = requester.discover_primary()
for prim in primary:
print(prim)
print("Done.")
|
import logging
import os
import json
import time
from tornado.gen import coroutine
from lib.tornado_yieldperiodic.yieldperiodic import YieldPeriodicCallback
from utils import Config, Przystanki, TramwajFactory
from db import MpkDb, PrzystankiDb
class SpawnWorker(YieldPeriodicCallback):
def __init__(self):
self.number = 1
self.db_file = os.environ['TRAM_ROOT'] + '/data/'
self.config = Config()
self.db = MpkDb()
self.przystanki_db = PrzystankiDb()
self.przystanki = Przystanki()
self.tramwaje = []
self.nowe_tramwaje = []
self.factorio = TramwajFactory()
YieldPeriodicCallback.__init__(self, self.run, 60000, faststart=True)
logging.info('SpawnWorker initialised')
def serialize_tram(self, tram):
return {
'line': tram.line,
'velocity': tram.velocity,
'state': tram.state,
'last_stop': tram.last_stop,
'next_stop': tram.next_stop,
'position': {
'x': tram.position['x'],
'y': tram.position['y']
},
'distance_to_go': tram.distance_to_go,
'last_update': tram.last_update
}
def get_json_tram(self, line):
to_send = []
for tram in self.tramwaje:
if tram.line == line:
to_send.append(self.serialize_tram(tram))
return to_send
def get_json_trams(self):
to_send = []
for tram in self.tramwaje:
to_send.append(self.serialize_tram(tram))
return to_send
def get_new_json_trams(self):
to_send = []
for tram in self.nowe_tramwaje:
to_send.append(self.serialize_tram(tram))
self.nowe_tramwaje = []
return to_send
@coroutine
def run(self):
logging.info('running for %s time', self.number)
self.number += 1
now_hour = time.strftime('%-H', time.localtime())
now_minute = time.strftime("%M", time.localtime())
petle = self.przystanki.petle
for petla in petle:
linie = petle[petla]['linie']
for linia in linie:
czas = self.przystanki_db.get_terminal_time(linia, petla)
if czas is not None and czas != '{}':
czas = json.loads(czas)['01']
if now_minute in (czas.get(now_hour) if czas.get(now_hour) is not None else {}):
logging.info('spawninig line: %s from %s', linia, petla)
tram = self.factorio.factory(linia, petla)
self.tramwaje.append(tram)
self.nowe_tramwaje.append(tram)
logging.info(self.tramwaje)
|
import logging
import operator
from src.ham.radio.default_radio.dmr_contact_default import DmrContactDefault
from src.ham.radio.default_radio.dmr_id_default import DmrIdDefault
from src.ham.radio.default_radio.dmr_user_default import DmrUserDefault
from src.ham.radio.default_radio.radio_channel_default import RadioChannelDefault
from src.ham.radio.default_radio.radio_zone_default import RadioZoneDefault
from src.ham.radio.radio_channel import RadioChannel
from src.ham.util import radio_types
from src.ham.util.data_column import DataColumn
from src.ham.util.path_manager import PathManager
from src.ham.util.validation_error import ValidationError
class Validator:
def __init__(self):
self._radio_channel_template = RadioChannelDefault.create_empty()
self._digital_contact_template = DmrContactDefault.create_empty()
self._dmr_id_template = DmrIdDefault.create_empty()
self._zone_template = RadioZoneDefault.create_empty()
self._dmr_user_template = DmrUserDefault.create_empty()
self._short_names = None
self._medium_names = None
self._long_names = None
return
def flush_names(self):
self._short_names = dict()
self._medium_names = dict()
self._long_names = dict()
@classmethod
def validate_files_exist(cls):
errors = []
files_list = ['input.csv', 'digital_contacts.csv', 'dmr_id.csv', 'zones.csv', 'user.csv']
for file_name in files_list:
if not PathManager.input_path_exists(file_name):
err = ValidationError(f"Cannot open file: `{file_name}`", None, file_name)
errors.append(err)
if len(errors) > 0:
logging.error("--- FILE MISSING ERRORS, CANNOT CONTINUE ---")
logging.info(f"Checked `{PathManager.get_input_path()}`")
for err in errors:
logging.error(f"\t\t{err.message}")
logging.info("Have you run `Wizard (new)` or `Migrations (update)` under `Dangerous Operations`?")
else:
logging.info("All necessary files found")
return errors
def validate_dmr_user(self, cols, line_num, file_name):
needed_cols_dict_gen = dict(self._dmr_user_template.__dict__)
return self._validate_generic(cols, line_num, file_name, needed_cols_dict_gen)
def validate_radio_zone(self, cols, line_num, file_name):
needed_cols_dict_gen = dict(self._zone_template.__dict__)
return self._validate_generic(cols, line_num, file_name, needed_cols_dict_gen)
def validate_dmr_id(self, cols, line_num, file_name):
needed_cols_dict_gen = dict(self._dmr_id_template.__dict__)
return self._validate_generic(cols, line_num, file_name, needed_cols_dict_gen)
def validate_digital_contact(self, cols, line_num, file_name):
needed_cols_dict_gen = dict(self._digital_contact_template.__dict__)
return self._validate_generic(cols, line_num, file_name, needed_cols_dict_gen)
def validate_radio_channel(self, cols, line_num, file_name, digital_contacts, zones):
needed_cols_dict_gen = dict(self._radio_channel_template.__dict__)
errors = self._validate_generic(cols, line_num, file_name, needed_cols_dict_gen)
if len(errors) > 0:
return errors
channel = RadioChannel(cols, None, None)
if channel.short_name.fmt_val().lower() in self._short_names.keys():
err = ValidationError(
f"Collision in {channel.short_name.get_alias(radio_types.DEFAULT)} "
f"(value: `{channel.short_name.fmt_val()}`) found with line"
f" {self._short_names[channel.short_name.fmt_val().lower()]}."
f" Codeplug applications do not handle this well.", line_num, file_name)
logging.debug(err.message)
errors.append(err)
else:
self._short_names[channel.short_name.fmt_val().lower()] = line_num
if channel.medium_name.fmt_val().lower() in self._medium_names.keys():
err = ValidationError(
f"Collision in {channel.medium_name.get_alias(radio_types.DEFAULT)} "
f"(value: `{channel.medium_name.fmt_val()}`) found with line"
f" {self._medium_names[channel.medium_name.fmt_val().lower()]}."
f" Codeplug applications do not handle this well.", line_num, file_name)
logging.debug(err.message)
errors.append(err)
else:
self._medium_names[channel.medium_name.fmt_val().lower()] = line_num
if channel.name.fmt_val().lower() in self._long_names.keys():
err = ValidationError(
f"Collision in {channel.name.get_alias(radio_types.DEFAULT)} "
f"(value: `{channel.name.fmt_val()}`) found with line"
f" {self._long_names[channel.name.fmt_val().lower()]}."
f" Codeplug applications do not handle this well.", line_num, file_name)
logging.debug(err.message)
errors.append(err)
else:
self._long_names[channel.name.fmt_val().lower()] = line_num
if channel.rx_dcs.fmt_val(23) not in radio_types.dcs_codes_inverses.keys():
err = ValidationError(
f"Invalid RX DCS code `{channel.rx_dcs.fmt_val()}` specified.", line_num, file_name
)
errors.append(err)
if channel.tx_dcs.fmt_val(23) not in radio_types.dcs_codes_inverses.keys():
err = ValidationError(
f"Invalid RX DCS code `{channel.rx_dcs.fmt_val()}` specified.", line_num, file_name
)
errors.append(err)
if channel.is_digital() and channel.digital_contact_id.fmt_val() not in digital_contacts.keys():
err = ValidationError(
f"Cannot find digital contact `{channel.digital_contact_id.fmt_val()}` specified in "
f"digital contacts.", line_num, file_name
)
errors.append(err)
acceptable_tx_powers = ["Low", "Medium", "High"]
if channel.tx_power.fmt_val() is None or channel.tx_power.fmt_val() not in acceptable_tx_powers:
err = ValidationError(
f"Transmit power (`tx_power`) invalid: `{channel.digital_contact_id.fmt_val()}`. Valid values "
f"are {acceptable_tx_powers}"
, line_num, file_name
)
errors.append(err)
if channel.zone_id.fmt_val() is not None and channel.zone_id.fmt_val() not in zones.keys():
err = ValidationError(
f"Zone ID not found: `{channel.zone_id.fmt_val()}`. in `zones.csv` and was not left empty."
, line_num, file_name
)
errors.append(err)
if operator.xor(channel.latitude.fmt_val() is None, channel.longitude.fmt_val() is None):
err = ValidationError(
f"Only one of latitude or longitude provided. Lat: `{channel.latitude.fmt_val()}` "
f"Long: `{channel.longitude.fmt_val()}"
, line_num, file_name
)
errors.append(err)
if channel.latitude.fmt_val() is not None and not -90 <= channel.latitude.fmt_val() <= 90:
err = ValidationError(
f"Latitude must be between -90 and 90. Lat: `{channel.latitude.fmt_val()}`"
, line_num, file_name
)
errors.append(err)
if channel.longitude.fmt_val() is not None and not -180 <= channel.longitude.fmt_val() <= 180:
err = ValidationError(
f"Longitude must be between -180 and 180. Lat: `{channel.longitude.fmt_val()}`"
, line_num, file_name
)
errors.append(err)
return errors
def _validate_generic(self, cols, line_num, file_name, needed_cols_dict_gen):
errors = []
needed_cols = dict()
for val in needed_cols_dict_gen.values():
if not isinstance(val, DataColumn):
logging.debug(f"Skipping adding `{val}` to needed cols")
continue
needed_cols[val.get_alias(radio_types.DEFAULT)] = val
ignored_cols = []
for col in cols.keys():
if col not in needed_cols.keys():
ignored_cols.append(col)
for ignored in ignored_cols:
cols.pop(ignored)
for key in cols.keys():
if key not in needed_cols.keys():
err = ValidationError(f"`{key}` missing from entry.", line_num, file_name)
errors.append(err)
continue
data_column = needed_cols[key]
if cols[key] == '':
continue
try:
data_column.shape(cols[key])
except ValueError:
shape_name = str(data_column.shape).replace("<class '", '').replace("'>", '')
err = ValidationError(
f"Error parsing `{cols[key]}` in column `{key}` as `{shape_name}`",
line_num,
file_name
)
logging.debug(err.message)
errors.append(err)
return errors
|
import math
from game.common.hitbox import Hitbox
def check_collision(hitbox_one: Hitbox, hitbox_two: Hitbox) -> bool:
return (hitbox_one.top_left[0] < hitbox_two.top_right[0] and
hitbox_one.top_right[0] > hitbox_two.top_left[0] and
hitbox_one.top_left[1] < hitbox_two.bottom_left[1] and
hitbox_one.bottom_right[1] > hitbox_two.top_right[1])
def distance(x1: float, y1: float, x2: float, y2: float) -> float:
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** (1 / 2)
def distance_tuples(coord_tuple1: tuple, coord_tuple2: tuple) -> float:
return distance(coord_tuple1[0], coord_tuple1[1], coord_tuple2[0], coord_tuple2[1])
def angle_to_point(player, coord_tuple: tuple) -> int:\
# Yoinked from
# https://stackoverflow.com/questions/2676719/calculating-the-angle-between-a-line-and-the-x-axis/27481611#27481611
deltaY = coord_tuple[1] - player.hitbox.position[1]
deltaX = coord_tuple[0] - player.hitbox.position[0]
result = int(math.degrees(math.atan2(deltaY, deltaX)))
return result + 360 if result < 0 else result
|
def vsum(nums: list[float]) -> float:
''' Sum together all the numbers in a list using tail recursion and if.
:param nums: List of numbers.
:returns: The sum of the numbers in the list.
'''
def loop(nums: list[float], total: float = 0.0) -> float:
if not nums:
return total
head, *tail = nums
return loop(tail, head + total)
return loop(nums)
algorithm = vsum
name = 'tail recursion'
|
# coding=utf-8
from commands.abstract_command import AbstractCommand
from config import Config
from model.karma import Karma
class GetLeaderBoardCommand(AbstractCommand):
def get_purpose(self):
return "Shows the best of the best!"
def execute(self, size: int=3):
return Karma.get_leader_board(size=size)
def __init__(self):
super(GetLeaderBoardCommand, self)
config = Config()
config.connect_to_db()
|
def is_palindrome(s):
if len(s) < 1:
return True
else:
if s[0] == s[-1]:
k=s[1:-1]
return is_palindrome(k)
else:
return False
a=str(input("Enter string:"))
if(is_palindrome(a)==True):
print("String is a palindrome!")
else:
print("String isn't a palindrome!") |
from abc import ABC, abstractmethod #Library that forces abstraction, similar to abstract classes in java
class subgod(ABC):
"""
This is the subgod class from which all other subgods/servant god classes derive
"""
version = '0.1'
@abstractmethod
def __init__(self, name, type, hitpoints, devotion, attack, defence):
"""
creates the variables associated with the class
:type name: string
:param name: the name of the god
:type type: string
:param type: the area the god is from i.e. greek
:type hitpoints: string
:param hitpoints: the amount of hitpoints the god starts with
:type devotion: string
:param devotion: the initial amount of devotion to their god the subgod has
"""
self.name = name
self.type = type
self.hitpoints = hitpoints
self.devotion = devotion
self.rank = 5
self.attack = attack
self.defence = defence
@abstractmethod
def determinerank(self, devotion):
"""
:type worshipers: string
:param worshipers: the amount of worshipers a god currently has
Method that changes the rank of the god based on worshipers;
overridden in 'type' class | Is an abstact Method
"""
pass
@abstractmethod
def servitudebonus(self):
"""Method that will be overridden based on which type of god the class is;
adds a bonus to an attribute based on type and rank | Is an abstact Method"""
pass
def __str__(self):
"""Child classes will now print with all important info"""
namestring = ("Name: {}".format(self.name))
typestring = ("God Type: {}".format(self.type))
hpstring = ("Hitpoints: {}".format(self.hitpoints))
rankstring = ("Rank: {}".format(self.rank))
devotionstring = ("Devotion: {}".format(self.devotion))
attackstring = ("Attack: {}".format(self.attack))
defencestring = ("Defence: {}".format(self.defence))
return "\n{} \n{} \n{} \n{} \n{} \n{} \n{}".format(namestring, typestring, hpstring, rankstring, devotionstring, attackstring, defencestring)
|
"""Utility Functions for the CNC_VIDEO app -- Library"""
import collections
import logging
import threading
import time
# Map of type names into types
TYPES = {
'int': int,
'float': float,
'string': str,
'boolean': bool
}
# Take a type name and a string and return the value cast to the given type.
def typeCast(typ, str):
if typ not in TYPES.keys():
logging.error("Invalid type: %s", typ)
raise ValueError
return TYPES[typ](str)
# Merge a new dict into an old one, updating the old one (recursively).
def dictMerge(old, new):
for k, v in new.iteritems():
if (k in old and isinstance(old[k], dict) and
isinstance(new[k], collections.Mapping)):
dictMerge(old[k], new[k])
else:
old[k] = new[k]
class Alarm(threading.Thread):
def __init__(self, queue, timeout):
self.q = queue
self.timeout = timeout
threading.Thread.__init__(self)
self.setDaemon(True)
def run(self):
time.sleep(self.timeout)
self.q.put(None)
#
# TEST
#
if __name__ == '__main__':
r = typeCast('int', "1")
print type(r), r
r = typeCast('float', "1")
print type(r), r
r = typeCast('string', "1")
print type(r), r
r = typeCast('boolean', "1")
print type(r), r
r = typeCast("foo", "1")
print type(r), r
|
import rich
import json
import click
import secrets
from typing import Union
from typing import Callable
from flask import Flask
from flask import request
from flask import redirect
from flask import Response
from flask.views import View
from werkzeug.security import generate_password_hash
from flask_login import current_user
from flask_sqlalchemy import SQLAlchemy
from pydantic import BaseModel
from pydantic import ValidationError
from .settings import BaseConfig
from .settings import JwtConfig
from .database.models import get_user_model
def validate_form_request(Model: BaseModel) -> bool:
try:
request_body: dict = request.form.to_dict()
Model(**request_body)
return True
except ValidationError:
return False
def validate_json_request(Model: BaseModel) -> Callable:
def decorator(f) -> Callable:
def wrapper(*args, **kwargs) -> Union[Response, Callable]:
request_body: dict = request.get_json()
if request_body is None:
return {"message": "Empty request body"}, 400
try:
data = Model(**request_body)
kwargs["data"] = data
return f(*args, **kwargs)
except ValidationError as e:
error_response = {}
for error in json.loads(e.json()):
field, message = error["loc"][0], error["msg"]
error_response[field] = message
return error_response
return wrapper
return decorator
def initalize_base_view(cls, base_view: View) -> None:
base_view.db = cls.db
base_view.base_config = cls.base_config
base_view.alerts = cls.alerts
base_view.template_config = cls.template_config
base_view.User = cls.User
base_view.HOME_URL = base_view.base_config.HOME_URL
base_view.LOGIN_URL = base_view.base_config.LOGIN_URL
base_view.REGISTER_URL = base_view.base_config.REGISTER_URL
return
def init_base_jwt_view(cls, base_view: View) -> None:
base_view.db = cls.db
base_view.User = cls.User
base_view.settings = cls.settings
base_view.app_config = cls.app.config
return
def redirect_if_authenticated(function) -> Union[Callable, Response]:
def wrapper(*args, **kwargs):
if current_user.is_authenticated:
return redirect("/")
return function(*args, **kwargs)
return wrapper
def check_table_name(db: SQLAlchemy, table_name: str) -> bool:
return table_name in db.engine.table_names()
def set_flask_app_config(app: Flask, config: BaseConfig) -> None:
app.config.update(
SECRET_KEY=secrets.token_hex(),
SQLALCHEMY_TRACK_MODIFICATIONS=False,
STATIC_FOLDER=config.STATIC_FOLDER_NAME
)
def get_create_admin_function(
db: SQLAlchemy,
settings: Union[BaseConfig, JwtConfig]
) -> Callable:
"""
Returns function for Flask app's CLI that allows to create superusers
"""
UserModel = get_user_model(db, settings.TABLENAME)
error_message = "[bold red] \nUser with that {} is already exists!"
min_password_length = settings.MIN_PASSWORD_LENGTH
def create_admin_user(email: str, username: str, password: str):
user_by_email = UserModel.query.filter_by(
email=email
).first()
user_by_username = UserModel.query.filter_by(
username=username
).first()
if user_by_email:
rich.print(
error_message.format("email"),
)
return None, False
if user_by_username:
rich.print(
error_message.format("username"),
)
return None, False
if len(password) < min_password_length:
rich.print(
f"[bold yellow] Password must be {min_password_length} characters long!",
)
return None, False
password_hash = generate_password_hash(password)
new_user = UserModel(
email=email,
username=username,
password_hash=password_hash,
admin=True
)
new_user.insert()
rich.print(
"[bold green] Superuser is created :tada:",
)
return create_admin_user
def add_create_admin_command(app: Flask, function: Callable):
@app.cli.command("create-admin")
@click.argument("email")
@click.argument("username")
@click.argument("password")
def create_admin(email: str, username: str, password: str):
function(email, username, password)
return
|
import jwt
import re
import six
import sys
from shopify.utils import shop_url
if sys.version_info[0] < 3: # Backwards compatibility for python < v3.0.0
from urlparse import urljoin
else:
from urllib.parse import urljoin
ALGORITHM = "HS256"
PREFIX = "Bearer "
REQUIRED_FIELDS = ["iss", "dest", "sub", "jti", "sid"]
class SessionTokenError(Exception):
pass
class InvalidIssuerError(SessionTokenError):
pass
class MismatchedHostsError(SessionTokenError):
pass
class TokenAuthenticationError(SessionTokenError):
pass
def decode_from_header(authorization_header, api_key, secret):
session_token = _extract_session_token(authorization_header)
decoded_payload = _decode_session_token(session_token, api_key, secret)
_validate_issuer(decoded_payload)
return decoded_payload
def _extract_session_token(authorization_header):
if not authorization_header.startswith(PREFIX):
raise TokenAuthenticationError("The HTTP_AUTHORIZATION_HEADER provided does not contain a Bearer token")
return authorization_header[len(PREFIX) :]
def _decode_session_token(session_token, api_key, secret):
try:
return jwt.decode(
session_token,
secret,
audience=api_key,
algorithms=[ALGORITHM],
options={"require": REQUIRED_FIELDS},
)
except jwt.exceptions.PyJWTError as exception:
six.raise_from(SessionTokenError(str(exception)), exception)
def _validate_issuer(decoded_payload):
_validate_issuer_hostname(decoded_payload)
_validate_issuer_and_dest_match(decoded_payload)
def _validate_issuer_hostname(decoded_payload):
issuer_root = urljoin(decoded_payload["iss"], "/")
if not shop_url.sanitize_shop_domain(issuer_root):
raise InvalidIssuerError("Invalid issuer")
def _validate_issuer_and_dest_match(decoded_payload):
issuer_root = urljoin(decoded_payload["iss"], "/")
dest_root = urljoin(decoded_payload["dest"], "/")
if issuer_root != dest_root:
raise MismatchedHostsError("The issuer and destination do not match")
|
import sys
import commands
def interpret(code):
memory_pointer = 0
memory = [0]
i = 0
while i < len(code):
if code[i] == '[' and memory[memory_pointer] == 0:
i = commands.find_end_of_loop(code, i)
elif code[i] == ']' and memory[memory_pointer] != 0:
i = commands.find_start_of_loop(code, i)
if commands.lookup.get(code[i]) != None:
memory, memory_pointer = commands.lookup[code[i]](memory, memory_pointer)
i += 1
if __name__ == "__main__":
try:
code_file = open(sys.argv[1], "r")
code = code_file.read()
code_file.close()
except:
raise Exception("Please provide path to brainfuck code as argument")
interpret(code)
|
import os
import sys
import numpy as np
import requests
import zipfile
from collections import Counter
from clint.textui import progress
from filtering import semantic_clustering
class AverageWordEmbedding(semantic_clustering.SemanticClustering):
'''
Averaged word embeddings clustering method. The meaning vector of the
sentence is created by the weighted average of the word vectors.
'''
# Download data from fasttext.
def download_fasttext(self):
# Open the url and download the data with progress bars.
data_stream = requests.get('https://dl.fbaipublicfiles.com/fasttext/' +
'vectors-english/wiki-news-300d-1M.vec.zip', stream=True)
zipped_path = os.path.join(self.input_dir, 'fasttext.zip')
with open(zipped_path, 'wb') as file:
total_length = int(data_stream.headers.get('content-length'))
for chunk in progress.bar(data_stream.iter_content(chunk_size=1024),
expected_size=total_length / 1024 + 1):
if chunk:
file.write(chunk)
file.flush()
# Extract file.
zip_file = zipfile.ZipFile(zipped_path, 'r')
zip_file.extractall(self.input_dir)
zip_file.close()
# Generate a vocab from data files.
def get_vocab(self, vocab_path):
vocab = []
with open(vocab_path, 'w') as file:
for dp in self.data_points['Source']:
vocab.extend(dp.string.split())
file.write('\n'.join(
[w[0] for w in Counter(vocab).most_common(self.config.vocab_size)]))
# Download FastText word embeddings.
def get_fast_text_embeddings(self):
vocab_path = os.path.join(self.input_dir, 'vocab.txt')
if not os.path.exists(vocab_path):
print('No vocab file named \'vocab.txt\' found in ' + self.input_dir)
print('Building vocab from data.')
self.get_vocab(vocab_path)
fasttext_path = os.path.join(self.input_dir, 'wiki-news-300d-1M.vec')
if not os.path.exists(fasttext_path):
self.download_fasttext()
vocab = [line.strip('\n') for line in open(vocab_path)]
vocab_path = os.path.join(self.input_dir, 'vocab.npy')
# Save the vectors for words in the vocab.
with open(fasttext_path, errors='ignore') as in_file:
with open(vocab_path, 'w') as out_file:
vectors = {}
for line in in_file:
tokens = line.strip().split()
vectors[tokens[0]] = line
for word in vocab:
try:
out_file.write(vectors[word])
except KeyError:
pass
# Generate the sentence embeddings.
def generate_embeddings(self, tag, vector_path):
'''
Params:
:tag: Whether it's source or target data.
:vector_path: Path to save the sentence vectors.
'''
vocab = {}
vocab_path = os.path.join(self.input_dir, 'vocab.npy')
if not os.path.exists(vocab_path):
print('File containing word vectors not found in ' + self.input_dir)
print('The file should be named \'vocab.npy\'')
print('If you would like to use FastText embeddings press \'y\'')
if input() == 'y':
self.get_fast_text_embeddings()
else:
sys.exit()
# Get the word embeddings.
with open(vocab_path) as v:
for line in v:
tokens = line.strip().split()
vocab[tokens[0]] = [0, np.array(list(map(float, tokens[1:])))]
embedding_dim = vocab[list(vocab)[0]][1].shape[0]
unique_sentences = set()
word_count = 0
# Statistics of number of words.
for dp in self.data_points[tag]:
unique_sentences.add(dp.string)
for word in dp.string.split():
if vocab.get(word):
vocab[word][0] += 1
word_count += 1
meaning_vectors = []
sentences = unique_sentences if self.unique else [
s.string for s in self.data_points[tag]]
# Calculate smooth average embedding.
for s in sentences:
vectors = []
for word in s.split():
vector = vocab.get(word)
if vector:
vectors.append(vector[1] * 0.001 / (0.001 + vector[0] / word_count))
num_vecs = len(vectors)
if num_vecs:
meaning_vectors.append(np.sum(np.array(vectors), axis=0) / num_vecs)
else:
meaning_vectors.append(np.zeros(embedding_dim))
np.save(vector_path, np.array(meaning_vectors).reshape(-1, embedding_dim))
|
import arcpy, os
from arcgis.gis import GIS
from arcgis.features import FeatureLayerCollection
import os
import sys
# Sign in to portal/set workspace
user = "Username"
password = "Password"
arcpy.SignInToPortal('https://www.arcgis.com', user, password)
portal = "http://www.arcgis.com"
gis = GIS(portal, user, password)
tag = "Quarterly"
arcpy.AddMessage("Connected to AGO as " + user + "...")
# set workspace gdb.
arcpy.env.workspace = r"C:\Users\SROSS-C\Documents\AGO_Publishing\QuarterlyUpdates\QuarterlyUpdates.gdb"
# Set output file names
outdir = r"C:\Users\SROSS-C\Documents\AGO_Publishing\QuarterlyUpdates"
# Reference map to publish
aprx = arcpy.mp.ArcGISProject(r"C:\Users\SROSS-C\Documents\AGO_Publishing\QuarterlyUpdates\QuarterlyUpdates.aprx")
m = aprx.listMaps()[0]
#Grab list of feature classes in map
fcList = arcpy.ListFeatureClasses()
#Get list of filenames.sddraft
arcpy.AddMessage("Getting list of filenames .sddraft...")
fcListAll =[]
i = 0
while i < len(fcList):
for fc in fcList:
service = fcList[i]
sddraft_filename = service + ".sddraft"
sddraft_output_filename = os.path.join(outdir, sddraft_filename)
fcListAll.append(sddraft_output_filename)
i+=1
fcListAll.sort()
print (fcListAll)
#Get the number of layers to loop through
lyrlist = []
for lyr in m.listLayers():
lyrlist.append(lyr)
lyrlist1 = len(lyrlist) - 1
#Loop through and create sddraft files for each layer
arcpy.AddMessage("Creating sddraft files...")
x = 0
while x <= lyrlist1:
for lyrs in m.listLayers():
sharing_draft = m.getWebLayerSharingDraft("HOSTING_SERVER", "FEATURE", lyrs, lyrs)
sharing_draft.credits = "TxDOT – TPP – Data Management"
sharing_draft.useLimitations = "Copyright 2018. Texas Department of Transportation. This data was produced for internal use within the Texas Department of Transportation and made available to the public for informational purposes only. Accuracy is limited to the validity of available data as of the date published"
arcpy.AddMessage("Exporting " + str(lyrs) + ".sddraft...")
sharing_draft.exportToSDDraft(fcListAll[x]) # Create Service Definition Draft file
x+=1
#Loop through and upload ead service definition file
finallist = []
t = 0
while t < len(fcList):
for fc in fcList:
arcpy.AddMessage("Starting...")
service = fcList[t]
sd_filename = service + ".sd"
sd_output_filename = os.path.join(outdir, sd_filename)
try:
os.remove(sd_output_filename)
except FileNotFoundError:
pass
try:
sdItem = gis.content.search("{} AND owner:{} AND tags:{}".format(service, user, tag), item_type="Service Definition", sort_field="title", sort_order="asc", max_items=100)[0]
except IndexError:
sdItem = "no"
pass
arcpy.AddMessage("Working on " + service + "...")
weblayer = sdItem.title
if service == weblayer:
try:
arcpy.AddMessage("Overwriting " + service + "...")
sddraft = fcListAll[t]
sd = sd_output_filename
arcpy.StageService_server(sddraft, sd)
sdItem.update(data=sd)
sdItem.publish(overwrite=True)
arcpy.AddMessage("Successfully published " + service)
t+=1
except IndexError:
arcpy.AddMessage("Failure to publish " + service)
continue
else:
try:
arcpy.AddMessage("Uploading Service Definition " + fcListAll[t] + "...")
arcpy.StageService_server(fcListAll[t], sd_output_filename)
arcpy.UploadServiceDefinition_server(sd_output_filename, "My Hosted Services")
arcpy.AddMessage("Successfully Uploaded " + service)
t+=1
except:
arcpy.AddMessage("Failure to publish " + fcListAll[t])
t+=1
arcpy.AddMessage("Successfully published services to AGO.")
# Remove underscores from titles
arcpy.AddMessage("Removing underscores from titles...")
# Search and create a list of content
fc = gis.content.search(query="owner: TPP_GIS AND type: Feature Service AND tags: Quarterly",sort_field="title",sort_order="asc", max_items=100 )
# Loop through item list
for item in fc:
title = item.title
newtitle = title.replace("_"," ")
arcpy.AddMessage("Changing " + title + " to " + newtitle + "...")
item.update(item_properties={'title':newtitle})
print (newtitle)
arcpy.AddMessage("Enabling Export...")
search_result= gis.content.search("owner: TPP_GIS AND type: Feature Service AND tags: Quarterly", sort_field="title", sort_order="asc", max_items=1000)
b = 0
while b < (len(search_result)):
item = search_result[b]
flc = FeatureLayerCollection.fromitem(item)
update_dict = {"capabilities": "Query,Extract"}
flc.manager.update_definition(update_dict)
arcpy.AddMessage(item)
b+=1
number = len(fc)
arcpy.AddMessage("Finished publishing " + str(number) + " layers!")
|
import time
import csv
import sys
import logging
import config as c
import utils
from DB import utils as db_utils
TWITTER_BATCH_LIMIT = 100
def fetch(config, users, db):
if not (hasattr(db, 'getAuthor') and hasattr(db, 'saveAuthor')):
db = c.load_db_driver('sqlite')
logging.info(f"looking for: {users} in {db}")
api = None
handles = []
need_fetch = []
def add_sn(screen_name, i, date):
if i: handles.append((screen_name, i))
db.saveAuthor(db_utils.make_user(screen_name, i, date))
for screen_name in users:
sn = screen_name.lower()
try:
i = db.getAuthor(sn)
if i: handles.append(i)
except (KeyError, AttributeError) as e:
logging.warn(f"{sn} not found in DB {db} ({e})")
need_fetch.append(sn)
while len(need_fetch):
if not api: api = utils.twitter_login(config)
batch = need_fetch[:TWITTER_BATCH_LIMIT]
need_fetch = need_fetch[TWITTER_BATCH_LIMIT:]
logging.debug(f"this batch is {len(batch)}, still need to fetch {len(need_fetch)}")
try:
lu = api.lookup_users(user_ids = None, screen_names = batch, include_entities = False)
except Exception as e:
lu = []
for u in lu:
sn = u._json['screen_name'].lower()
add_sn(sn, u._json['id'], u._json['created_at'])
batch.remove(sn)
for sn in batch:
add_sn(sn, None)
logging.info(handles)
return handles
if __name__ == "__main__":
DB_CONFIG = c.DBS
DB_CONFIG["default"] = "sqlite"
opts = c.parse_args([DB_CONFIG, c.DEBUG, c.CONFIG_FILE, c.CSV_FILE, c.USERS, ])
config = opts.config[0]
ids = None
try:
ids = opts.ids
except KeyError:
ids = []
if opts.csv:
ids.extend(fetch(config, opts.csv, opts.db))
print("screen_name\tid")
for u, i in ids:
print(f"{u}\t{i}")
|
import csv
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Set
from hed_utils.support import google_spreadsheet
__all__ = [
"AStorage",
"CsvJobsStorage",
"GoogleSheetsJobsStorage"
]
_log = logging.getLogger(__name__)
_log.addHandler(logging.NullHandler())
class AStorage(ABC):
def __init__(self, columns: List[str]):
self.columns = columns
def convert_to_rows(self, jobs: List[Dict[str, str]]) -> List[List[str]]:
_log.debug("converting %s jobs to rows... (columns: %s)", len(jobs), self.columns)
return [[j.get(col, "") for col in self.columns] for j in jobs]
@abstractmethod
def get_known_jobs_urls(self) -> Set[str]:
"""Return urls of jobs that are already stored.(Used for filtering duplicate records)"""
pass
@abstractmethod
def store_jobs(self, jobs: List[Dict[str, str]]):
"""Store the processed jobs data to the backing storage"""
pass
class CsvJobsStorage(AStorage):
def __init__(self, columns: List[str], filepath: str, ):
super().__init__(columns)
self.urls_column_index = columns.index("url")
self.filepath = Path(filepath).absolute()
_log.info("initialized %s: columns=%s, urls_column_index=%s, filepath='%s'",
type(self).__name__, columns, self.urls_column_index, str(self.filepath))
def init_file(self):
_log.debug("initializing jobs csv: columns=%s, file='%s'", self.columns, str(self.filepath))
with self.filepath.open(mode="w") as fp:
writer = csv.writer(fp)
writer.writerow(self.columns)
def get_known_jobs_urls(self) -> Set[str]:
_log.debug("getting known jobs urls from csv...")
with self.filepath.open(mode="r") as fp:
reader = csv.reader(fp)
try:
_ = next(reader) # skip header
known_jobs_urls = {row[self.urls_column_index] for row in reader}
except StopIteration:
known_jobs_urls = set()
_log.debug("got %s known jobs urls", len(known_jobs_urls))
return known_jobs_urls
def store_jobs(self, jobs: List[Dict[str, str]]):
rows = self.convert_to_rows(jobs)
_log.debug("appending %s rows to csv at: %s", len(jobs), str(self.filepath))
with self.filepath.open(mode="a") as fp:
writer = csv.writer(fp)
writer.writerows(rows)
class GoogleSheetsJobsStorage(AStorage):
def __init__(self, columns: List[str], spreadsheet_title: str, worksheet_title: str, json_filepath: str):
super().__init__(columns)
self.urls_column_index = columns.index("url") + 1 # 1-based index
self.worksheet = google_spreadsheet.open_worksheet(spreadsheet_title=spreadsheet_title,
worksheet_title=worksheet_title,
json_filepath=json_filepath)
_log.info("initialized %s: "
"columns=%s, urls_column_index=%s, spreadsheet_title='%s', worksheet_title='%s', json_filepath='%s'",
type(self).__name__,
columns, self.urls_column_index, spreadsheet_title, worksheet_title, json_filepath)
def get_known_jobs_urls(self) -> Set[str]:
_log.debug("getting known jobs urls from worksheet...")
known_jobs_urls = set(self.worksheet.col_values(self.urls_column_index))
_log.debug("got %s known jobs urls", len(known_jobs_urls))
return known_jobs_urls
def store_jobs(self, jobs: List[Dict[str, str]]):
rows = self.convert_to_rows(jobs)
_log.debug("appending %s jobs to worksheet: %s", len(rows), self.worksheet)
google_spreadsheet.append_worksheet_values(worksheet=self.worksheet, values=rows)
|
#!/usr/bin/env python3
# encoding: utf-8
import numpy as np
import torch as th
import torch.nn.functional as F
from rls.algorithms.base.sarl_off_policy import SarlOffPolicy
from rls.common.data import Data
from rls.common.decorator import iton
from rls.nn.models import BCQ_DCT, BCQ_Act_Cts, BCQ_CriticQvalueOne
from rls.nn.modules.wrappers import TargetTwin
from rls.nn.offline.bcq_vae import VAE
from rls.nn.utils import OPLR
from rls.utils.expl_expt import ExplorationExploitationClass
from rls.utils.torch_utils import n_step_return
class BCQ(SarlOffPolicy):
"""
Benchmarking Batch Deep Reinforcement Learning Algorithms, http://arxiv.org/abs/1910.01708
Off-Policy Deep Reinforcement Learning without Exploration, http://arxiv.org/abs/1812.02900
"""
policy_mode = 'off-policy'
def __init__(self,
polyak=0.995,
discrete=dict(threshold=0.3,
lr=5.0e-4,
eps_init=1,
eps_mid=0.2,
eps_final=0.01,
init2mid_annealing_step=1000,
assign_interval=1000,
network_settings=[32, 32]),
continuous=dict(phi=0.05,
lmbda=0.75,
select_samples=100,
train_samples=10,
actor_lr=1e-3,
critic_lr=1e-3,
vae_lr=1e-3,
network_settings=dict(actor=[32, 32],
critic=[32, 32],
vae=dict(encoder=[750, 750],
decoder=[750, 750]))),
**kwargs):
super().__init__(**kwargs)
self._polyak = polyak
if self.is_continuous:
self._lmbda = continuous['lmbda']
self._select_samples = continuous['select_samples']
self._train_samples = continuous['train_samples']
self.actor = TargetTwin(BCQ_Act_Cts(self.obs_spec,
rep_net_params=self._rep_net_params,
action_dim=self.a_dim,
phi=continuous['phi'],
network_settings=continuous['network_settings']['actor']),
polyak=self._polyak).to(self.device)
self.critic = TargetTwin(BCQ_CriticQvalueOne(self.obs_spec,
rep_net_params=self._rep_net_params,
action_dim=self.a_dim,
network_settings=continuous['network_settings']['critic']),
polyak=self._polyak).to(self.device)
self.vae = VAE(self.obs_spec,
rep_net_params=self._rep_net_params,
a_dim=self.a_dim,
z_dim=self.a_dim * 2,
hiddens=continuous['network_settings']['vae']).to(self.device)
self.actor_oplr = OPLR(self.actor, continuous['actor_lr'], **self._oplr_params)
self.critic_oplr = OPLR(self.critic, continuous['critic_lr'], **self._oplr_params)
self.vae_oplr = OPLR(self.vae, continuous['vae_lr'], **self._oplr_params)
self._trainer_modules.update(actor=self.actor,
critic=self.critic,
vae=self.vae,
actor_oplr=self.actor_oplr,
critic_oplr=self.critic_oplr,
vae_oplr=self.vae_oplr)
else:
self.expl_expt_mng = ExplorationExploitationClass(eps_init=discrete['eps_init'],
eps_mid=discrete['eps_mid'],
eps_final=discrete['eps_final'],
init2mid_annealing_step=discrete[
'init2mid_annealing_step'],
max_step=self._max_train_step)
self.assign_interval = discrete['assign_interval']
self._threshold = discrete['threshold']
self.q_net = TargetTwin(BCQ_DCT(self.obs_spec,
rep_net_params=self._rep_net_params,
output_shape=self.a_dim,
network_settings=discrete['network_settings']),
polyak=self._polyak).to(self.device)
self.oplr = OPLR(self.q_net, discrete['lr'], **self._oplr_params)
self._trainer_modules.update(model=self.q_net,
oplr=self.oplr)
@iton
def select_action(self, obs):
if self.is_continuous:
_actions = []
for _ in range(self._select_samples):
_actions.append(self.actor(obs, self.vae.decode(obs), rnncs=self.rnncs)) # [B, A]
self.rnncs_ = self.actor.get_rnncs() # TODO: calculate corrected hidden state
_actions = th.stack(_actions, dim=0) # [N, B, A]
q1s = []
for i in range(self._select_samples):
q1s.append(self.critic(obs, _actions[i])[0])
q1s = th.stack(q1s, dim=0) # [N, B, 1]
max_idxs = q1s.argmax(dim=0, keepdim=True)[-1] # [1, B, 1]
actions = _actions[max_idxs, th.arange(self.n_copies).reshape(self.n_copies, 1), th.arange(self.a_dim)]
else:
q_values, i_values = self.q_net(obs, rnncs=self.rnncs) # [B, *]
q_values = q_values - q_values.min(dim=-1, keepdim=True)[0] # [B, *]
i_values = F.log_softmax(i_values, dim=-1) # [B, *]
i_values = i_values.exp() # [B, *]
i_values = (i_values / i_values.max(-1, keepdim=True)[0] > self._threshold).float() # [B, *]
self.rnncs_ = self.q_net.get_rnncs()
if self._is_train_mode and self.expl_expt_mng.is_random(self._cur_train_step):
actions = np.random.randint(0, self.a_dim, self.n_copies)
else:
actions = (i_values * q_values).argmax(-1) # [B,]
return actions, Data(action=actions)
@iton
def _train(self, BATCH):
if self.is_continuous:
# Variational Auto-Encoder Training
recon, mean, std = self.vae(BATCH.obs, BATCH.action, begin_mask=BATCH.begin_mask)
recon_loss = F.mse_loss(recon, BATCH.action)
KL_loss = -0.5 * (1 + th.log(std.pow(2)) - mean.pow(2) - std.pow(2)).mean()
vae_loss = recon_loss + 0.5 * KL_loss
self.vae_oplr.optimize(vae_loss)
target_Qs = []
for _ in range(self._train_samples):
# Compute value of perturbed actions sampled from the VAE
_vae_actions = self.vae.decode(BATCH.obs_,
begin_mask=BATCH.begin_mask)
_actor_actions = self.actor.t(BATCH.obs_, _vae_actions,
begin_mask=BATCH.begin_mask)
target_Q1, target_Q2 = self.critic.t(BATCH.obs_, _actor_actions,
begin_mask=BATCH.begin_mask)
# Soft Clipped Double Q-learning
target_Q = self._lmbda * th.min(target_Q1, target_Q2) + \
(1. - self._lmbda) * th.max(target_Q1, target_Q2)
target_Qs.append(target_Q)
target_Qs = th.stack(target_Qs, dim=0) # [N, T, B, 1]
# Take max over each BATCH.action sampled from the VAE
target_Q = target_Qs.max(dim=0)[0] # [T, B, 1]
target_Q = n_step_return(BATCH.reward,
self.gamma,
BATCH.done,
target_Q,
BATCH.begin_mask).detach() # [T, B, 1]
current_Q1, current_Q2 = self.critic(BATCH.obs, BATCH.action, begin_mask=BATCH.begin_mask)
td_error = ((current_Q1 - target_Q) + (current_Q2 - target_Q)) / 2
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
self.critic_oplr.optimize(critic_loss)
# Pertubation Model / Action Training
sampled_actions = self.vae.decode(BATCH.obs, begin_mask=BATCH.begin_mask)
perturbed_actions = self.actor(BATCH.obs, sampled_actions, begin_mask=BATCH.begin_mask)
# Update through DPG
q1, _ = self.critic(BATCH.obs, perturbed_actions, begin_mask=BATCH.begin_mask)
actor_loss = -q1.mean()
self.actor_oplr.optimize(actor_loss)
return td_error, {
'LEARNING_RATE/actor_lr': self.actor_oplr.lr,
'LEARNING_RATE/critic_lr': self.critic_oplr.lr,
'LEARNING_RATE/vae_lr': self.vae_oplr.lr,
'LOSS/actor_loss': actor_loss,
'LOSS/critic_loss': critic_loss,
'LOSS/vae_loss': vae_loss,
'Statistics/q_min': q1.min(),
'Statistics/q_mean': q1.mean(),
'Statistics/q_max': q1.max()
}
else:
q_next, i_next = self.q_net(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
q_next = q_next - q_next.min(dim=-1, keepdim=True)[0] # [B, *]
i_next = F.log_softmax(i_next, dim=-1) # [T, B, A]
i_next = i_next.exp() # [T, B, A]
i_next = (i_next / i_next.max(-1, keepdim=True)[0] > self._threshold).float() # [T, B, A]
q_next = i_next * q_next # [T, B, A]
next_max_action = q_next.argmax(-1) # [T, B]
next_max_action_one_hot = F.one_hot(next_max_action.squeeze(), self.a_dim).float() # [T, B, A]
q_target_next, _ = self.q_net.t(BATCH.obs_, begin_mask=BATCH.begin_mask) # [T, B, A]
q_target_next_max = (q_target_next * next_max_action_one_hot).sum(-1, keepdim=True) # [T, B, 1]
q_target = n_step_return(BATCH.reward,
self.gamma,
BATCH.done,
q_target_next_max,
BATCH.begin_mask).detach() # [T, B, 1]
q, i = self.q_net(BATCH.obs, begin_mask=BATCH.begin_mask) # [T, B, A]
q_eval = (q * BATCH.action).sum(-1, keepdim=True) # [T, B, 1]
td_error = q_target - q_eval # [T, B, 1]
q_loss = (td_error.square() * BATCH.get('isw', 1.0)).mean() # 1
imt = F.log_softmax(i, dim=-1) # [T, B, A]
imt = imt.reshape(-1, self.a_dim) # [T*B, A]
action = BATCH.action.reshape(-1, self.a_dim) # [T*B, A]
i_loss = F.nll_loss(imt, action.argmax(-1)) # 1
loss = q_loss + i_loss + 1e-2 * i.pow(2).mean()
self.oplr.optimize(loss)
return td_error, {
'LEARNING_RATE/lr': self.oplr.lr,
'LOSS/q_loss': q_loss,
'LOSS/i_loss': i_loss,
'LOSS/loss': loss,
'Statistics/q_max': q_eval.max(),
'Statistics/q_min': q_eval.min(),
'Statistics/q_mean': q_eval.mean()
}
def _after_train(self):
super()._after_train()
if self.is_continuous:
self.actor.sync()
self.critic.sync()
else:
if self._polyak != 0:
self.q_net.sync()
else:
if self._cur_train_step % self.assign_interval == 0:
self.q_net.sync()
|
#!/usr/bin/python
"""
Quick Bezier profile editor
...Sorta
"""
from __future__ import division # Need to get floats when dividing intergers
from Qt import QtWidgets, QtGui, QtCore, QtCompat
import maya.OpenMayaUI as mui
# Thank you Freya Holmer | Neat Corp
# https://youtu.be/NzjF1pdlK7Y
def lerp(a, b, t):
return ((1.0 - t) * a + b * t)
def inv_lerp(a, b, v):
return ((v - a) / (b - a))
def remap(iMin, iMax, oMin, oMax, v):
t = inv_lerp(iMin, iMax, v)
return lerp(oMin, oMax, t)
def _get_maya_window():
ptr = mui.MQtUtil.mainWindow()
return QtCompat.wrapInstance(long(ptr), QtWidgets.QMainWindow)
class Example(QtWidgets.QDialog):
def __init__(self):
super(Example, self).__init__()
self.setParent(_get_maya_window())
self.setWindowFlags(
QtCore.Qt.Dialog |
QtCore.Qt.WindowCloseButtonHint
)
self.setProperty("saveWindowPref", True)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.lmb = True
self.rmb = True
self.margin = 20
self.x1 = 200
self.y1 = 200
self.x2 = 200
self.y2 = 200
self.red = QtGui.QColor(250, 0, 0 , 150)
self.blue = QtGui.QColor( 0, 0, 255, 150)
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 400, 400)
self.setFixedSize(400, 400)
self.setWindowTitle('Bezier curve')
self.show()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing)
self.drawRectangle(qp, self.margin, self.margin, self.geometry().width()-(2*self.margin), self.geometry().height()-(2*self.margin))
if self.lmb:
self.drawBezierCurve(qp, self.x1, self.margin, self.x2, 400 - self.margin)
self.drawLine(qp, self.margin, self.margin, self.x1, self.margin)
self.drawLine(qp, 400 - self.margin, 400 - self.margin, self.x2, 400 - self.margin)
self.drawDots(qp, self.x1, self.margin, self.red)
self.drawDots(qp, self.x2, 400 - self.margin, self.red)
if self.rmb:
self.drawBezierCurve(qp, self.margin, self.y1, 400 - self.margin, self.y2)
self.drawLine(qp, self.margin, self.margin, self.margin, self.y1)
self.drawLine(qp, 400 - self.margin, 400 - self.margin, 400 - self.margin, self.y2)
self.drawDots(qp, self.margin, self.y1, self.blue)
self.drawDots(qp, 400 - self.margin, self.y2, self.blue)
qp.end()
def drawRectangle(self, qp, x, y, width, height):
brush = QtGui.QBrush(QtGui.QColor(50, 50, 50))
pen = QtGui.QPen()
pen.setColor(QtGui.QColor(192, 192, 192))
pen.setWidth(1)
qp.setPen(pen)
qp.setBrush(brush)
qp.drawRect(x, y, width, height)
qp.setBrush(QtCore.Qt.NoBrush)
def drawDots(self, qp, x, y, color):
pen = QtGui.QPen()
pen.setColor(color)
pen.setCapStyle(QtCore.Qt.RoundCap)
pen.setWidth(10)
qp.setPen(pen)
qp.drawPoint(x,y)
def drawBezierCurve(self, qp, x1, y1, x2, y2):
pen = QtGui.QPen()
pen.setColor(QtGui.QColor(192, 192, 192))
pen.setWidth(1)
qp.setPen(pen)
path = QtGui.QPainterPath()
path.moveTo(self.margin, self.margin)
path.cubicTo(x1, y1, x2, y2, 400 - (self.margin), 400 - (self.margin))
qp.drawPath(path)
def drawLine(self, qp, x0, y0, x1, y1):
pen = QtGui.QPen()
pen.setCapStyle(QtCore.Qt.RoundCap)
pen.setStyle(QtCore.Qt.DotLine)
pen.setColor(QtGui.QColor(192, 192, 192))
pen.setWidth(2)
qp.setPen(pen)
path = QtGui.QPainterPath()
path.moveTo(x0, y0)
path.lineTo(x1, y1)
qp.drawPath(path)
def mouseMoveEvent(self, pos):
width = self.geometry().width()
height = self.geometry().height()
# Start doing math here to symmetrize the vertical
# and do opposite the horizontal
pX = pos.x() / width
pY = pos.y() / height
percentageX = remap(0.0, 1.0, 0.0, 1.0, pX)
percentageY = remap(0.0, 1.0, 0.0, 1.0, pY)
x1Value = min(max(self.margin, pos.x()), 400 - self.margin)
y1Value = min(max(self.margin, pos.y()), 400 - self.margin)
x2Value = min(max(self.margin, 400 * (1.0 - percentageY)), 400 - self.margin)
y2Value = min(max(self.margin, 400 * (1.0 - percentageX)), 400 - self.margin)
self.x1 = x1Value
self.y1 = y1Value
self.x2 = x2Value
self.y2 = y2Value
self.update() # Repaint
def mousePressEvent(self, event):
check = QtWidgets.QApplication.instance().mouseButtons()
self.lmb = bool(QtCore.Qt.LeftButton & check)
self.rmb = bool(QtCore.Qt.RightButton & check)
super(Example, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
check = QtWidgets.QApplication.instance().mouseButtons()
self.lmb = bool(QtCore.Qt.LeftButton & check)
self.rmb = bool(QtCore.Qt.RightButton & check)
super(Example, self).mouseReleaseEvent(event)
def main():
global _UI
try:
_UI.close()
_UI.deleteLater()
_UI = None
except:
pass
finally:
_UI = Example()
if __name__ == '__main__':
main() |
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
from utils import *
acts = {
'RELU': nn.ReLU,
'ELU': nn.ELU,
'TANH': nn.Tanh,
'NONE': nn.Identity,
}
def load_MAC(cfg, vocab):
kwargs = {
'vocab': vocab,
'num_answers': len(vocab['answer_token_to_idx']),
}
model = MACNetwork(cfg, **kwargs)
model_ema = MACNetwork(cfg, **kwargs)
for param in model_ema.parameters():
param.requires_grad = False
if torch.cuda.is_available() and cfg.CUDA:
model.cuda()
model_ema.cuda()
else:
model.cpu()
model_ema.cpu()
model.train()
return model, model_ema
def mask_by_length(x, lengths, device=None):
lengths = torch.as_tensor(lengths, dtype=torch.float32, device=device)
max_len = max(lengths)
mask = torch.arange(max_len, device=device).expand(len(lengths), int(max_len)) < lengths.unsqueeze(1)
mask = mask.float().unsqueeze(2)
x_masked = x * mask + (1 - 1 / mask)
return x_masked
class ControlUnit(nn.Module):
def __init__(self,
module_dim,
max_step=4,
separate_syntax_semantics=False,
control_feed_prev=True,
control_cont_activation='TANH'
):
super().__init__()
self.attn = nn.Linear(module_dim, 1)
self.control_input = nn.Sequential(nn.Linear(module_dim, module_dim),
nn.Tanh())
if control_feed_prev:
self.cont_control = nn.Linear(2 * module_dim, module_dim)
self.cont_control_act = acts[control_cont_activation]()
else:
self.cont_control = None
self.cont_control_act = None
self.cw_attn_idty = nn.Identity()
self.control_input_u = nn.ModuleList()
for i in range(max_step):
self.control_input_u.append(nn.Linear(module_dim, module_dim))
self.module_dim = module_dim
self.control_feed_prev = control_feed_prev
self.separate_syntax_semantics = separate_syntax_semantics
def mask(self, question_lengths, device):
max_len = max(question_lengths)
mask = torch.arange(max_len, device=device).expand(len(question_lengths), int(max_len)) < question_lengths.unsqueeze(1)
mask = mask.float()
ones = torch.ones_like(mask)
mask = (ones - mask) * (1e-30)
return mask
# @staticmethod
# def mask_by_length(x, lengths, device=None):
# lengths = torch.as_tensor(lengths, dtype=torch.float32, device=device)
# max_len = max(lengths)
# mask = torch.arange(max_len, device=device).expand(len(lengths), int(max_len)) < lengths.unsqueeze(1)
# mask = mask.float().unsqueeze(2)
# x_masked = x * mask + (1 - 1 / mask)
# return x_masked
def forward(self, question, context, question_lengths, step, prev_control=None):
"""
Args:
question: external inputs to control unit (the question vector).
[batchSize, ctrlDim]
context: the representation of the words used to compute the attention.
[batchSize, questionLength, ctrlDim]
control: previous control state
question_lengths: the length of each question.
[batchSize]
step: which step in the reasoning chain
"""
if self.separate_syntax_semantics:
syntactics, semantics = context
else:
syntactics = semantics = context
# compute interactions with question words
question = self.control_input(question)
question = self.control_input_u[step](question)
if self.control_feed_prev:
newContControl = self.cont_control(torch.cat((prev_control, question), dim=1))
newContControl = self.cont_control_act(newContControl)
else:
newContControl = question
newContControl = torch.unsqueeze(newContControl, 1)
interactions = newContControl * syntactics
# compute attention distribution over words and summarize them accordingly
logits = self.attn(interactions)
logits = mask_by_length(logits, question_lengths, device=syntactics.device)
attn = F.softmax(logits, 1)
attn = self.cw_attn_idty(attn)
# apply soft attention to current context words
next_control = (attn * semantics).sum(1)
return next_control
class ReadUnit(nn.Module):
def __init__(self, module_dim, gate=False, num_lobs=0, num_gt_lobs=0, use_feats='spatial'):
super().__init__()
assert not ((num_lobs > 0) and (num_gt_lobs > 0))
self.gate = gate
self.use_feats = use_feats
self.module_dim = module_dim
self.num_gt_lobs = num_gt_lobs
self.concat = nn.Linear(module_dim * 2, module_dim)
# self.concat_2 = nn.Linear(module_dim, module_dim)
self.attn = nn.Linear(module_dim, 1)
self.dropout = nn.Dropout(0.15)
self.kproj = nn.Linear(module_dim, module_dim)
self.mproj = nn.Linear(module_dim, module_dim)
self.activation = nn.ELU()
self.kb_attn_idty = nn.Identity()
if gate:
self.gate = nn.Linear(module_dim, 1)
self.gate_sigmoid = nn.Sigmoid()
self.lobs = nn.Parameter(torch.randn(num_lobs, module_dim))
# self.lobs_attn = nn.Linear(module_dim, 1)
self.lobs_attn_idty = nn.Identity()
else:
self.gate = self.lobs = self.gate_sigmoid = self.lobs_attn_idty = None
def forward(self, memory, know, control, memDpMask=None):
"""
Args:
memory: the cell's memory state
[batchSize, memDim]
know: representation of the knowledge base (image).
[batchSize, kbSize (Height * Width), memDim]
control: the cell's control state
[batchSize, ctrlDim]
memDpMask: variational dropout mask (if used)
[batchSize, memDim]
"""
bsz = memory.size(0)
## Step 1: knowledge base / memory interactions
# compute interactions between knowledge base and memory
if self.use_feats == 'objects':
know, objs_length = know
else:
know = know
objs_length = None
know = self.dropout(know)
if memDpMask is not None:
if self.training:
memory = applyVarDpMask(memory, memDpMask, 0.85)
else:
memory = self.dropout(memory)
know_proj = self.kproj(know)
memory_proj = self.mproj(memory)
memory_proj = memory_proj.unsqueeze(1)
interactions = know_proj * memory_proj
# project memory interactions back to hidden dimension
interactions = torch.cat([interactions, know_proj], -1)
interactions = self.concat(interactions)
interactions = self.activation(interactions)
# interactions = self.concat_2(interactions)
## Step 2: compute interactions with control
control = control.unsqueeze(1)
interactions = interactions * control
interactions = self.activation(interactions)
## Step 3: sum attentions up over the knowledge base
# transform vectors to attention distribution
interactions = self.dropout(interactions)
attn = self.attn(interactions).squeeze(-1)
if objs_length is not None:
attn = attn.unsqueeze(2)
attn = mask_by_length(attn, objs_length + self.num_gt_lobs, device=know.device)
attn = attn.squeeze(2)
attn = F.softmax(attn, 1)
attn = self.kb_attn_idty(attn)
# sum up the knowledge base according to the distribution
attn = attn.unsqueeze(-1)
read = (attn * know).sum(1)
if self.gate:
# compute attention on lobs
lobs = self.lobs.unsqueeze(0).expand(bsz, *self.lobs.size())
lobs_interations = lobs * control
lobs_attn = self.attn(lobs_interations).squeeze(-1)
lobs_attn = F.softmax(lobs_attn, 1)
lobs_attn = self.lobs_attn_idty(lobs_attn)
lobs_attn = lobs_attn.unsqueeze(-1)
lobs_read = (lobs_attn * lobs).sum(1)
# compute gate
z = self.gate(control.squeeze(1))
z = self.gate_sigmoid(z)
read = z * read + (1 - z) * lobs_read
return read
class WriteUnit(nn.Module):
def __init__(
self,
module_dim,
rtom=True,
self_attn=False,
gate=False,
gate_shared=False,
):
super().__init__()
self.rtom = rtom
self.self_attn = self_attn
self.gate = gate
self.gate_shared = gate_shared
if self.rtom is False:
self.linear = nn.Linear(module_dim * 2, module_dim)
if self_attn:
self.linear = nn.Linear(module_dim * 3, module_dim)
self.ctrl_attn_proj = nn.Linear(module_dim, module_dim)
self.ctrl_attn_linear = nn.Linear(module_dim, 1)
else:
self.linear = nn.Linear(module_dim * 2, module_dim)
self.ctrl_attn_proj = None
self.ctrl_attn_linear = None
if gate:
if gate_shared:
dim_gate_out = 1
else:
dim_gate_out = module_dim
self.ctrl_gate_linear = nn.Linear(module_dim, dim_gate_out)
else:
self.ctrl_gate_linear = None
else:
self.linear = None
def forward(self, memory, info, control=None, prev_controls=None, prev_memories=None):
if self.rtom:
newMemory = info
else:
newMemory = torch.cat([memory, info], -1)
if self.self_attn:
control = self.ctrl_attn_proj(control)
prev_controls = torch.cat(prev_controls, dim=1)
interactions = prev_controls * control.unsqueeze(1)
attn = self.ctrl_attn_linear(interactions).squeeze(-1)
attn = F.softmax(attn, 1).unsqueeze(-1)
prev_memories = torch.cat(prev_memories, dim=1)
self_smry = (attn * prev_memories).sum(1)
newMemory = torch.cat([newMemory, self_smry], dim=-1)
newMemory = self.linear(newMemory)
if self.gate:
control = self.ctrl_gate_linear(control)
z = torch.sigmoid(control)
newMemory = newMemory * z + memory * (1 - z)
return newMemory
class MACUnit(nn.Module):
def __init__(self, units_cfg, module_dim=512, max_step=4):
super().__init__()
self.cfg = cfg
self.control = ControlUnit(
**{
'module_dim': module_dim,
'max_step': max_step,
**units_cfg.common,
**units_cfg.control_unit
})
self.read = ReadUnit(
**{
'module_dim': module_dim,
**units_cfg.common,
**units_cfg.read_unit,
})
self.write = WriteUnit(
**{
'module_dim': module_dim,
**units_cfg.common,
**units_cfg.write_unit,
})
self.initial_memory = nn.Parameter(torch.FloatTensor(1, module_dim))
if cfg.model.init_mem == 'random':
self.initial_memory.data.normal_()
else:
self.initial_memory.data.zero_()
self.module_dim = module_dim
self.max_step = max_step
def zero_state(self, batch_size, question):
initial_memory = self.initial_memory.expand(batch_size, self.module_dim)
initial_control = question
if self.cfg.TRAIN.VAR_DROPOUT:
memDpMask = generateVarDpMask((batch_size, self.module_dim), 0.85)
else:
memDpMask = None
return initial_control, initial_memory, memDpMask
def forward(self, context, question, knowledge, question_lengths):
batch_size = question.size(0)
control, memory, memDpMask = self.zero_state(batch_size, question)
controls = [control.unsqueeze(1)]
memories = [memory.unsqueeze(1)]
for i in range(self.max_step):
# control unit
control = self.control(question, context, question_lengths, i, prev_control=control)
# read unit
info = self.read(memory, knowledge, control, memDpMask)
# write unit
memory = self.write(memory, info, control,
prev_controls=controls, prev_memories=memories,
)
# For write self attn
controls.append(control.unsqueeze(1))
memories.append(memory.unsqueeze(1))
return memory
class InputUnit(nn.Module):
def __init__(self,
vocab_size,
wordvec_dim=300,
rnn_dim=512,
module_dim=512,
bidirectional=True,
separate_syntax_semantics=False,
separate_syntax_semantics_embeddings=False,
stem_act='ELU',
in_channels=1024,
use_feats='spatial',
num_gt_lobs=0,
):
super(InputUnit, self).__init__()
self.dim = module_dim
self.use_feats = use_feats
self.wordvec_dim = wordvec_dim
self.separate_syntax_semantics = separate_syntax_semantics
self.separate_syntax_semantics_embeddings = separate_syntax_semantics and separate_syntax_semantics_embeddings
stem_act = acts[stem_act]
if self.use_feats == 'spatial':
self.stem = nn.Sequential(nn.Dropout(p=0.18),
nn.Conv2d(in_channels, module_dim, 3, 1, 1),
stem_act(),
nn.Dropout(p=0.18),
nn.Conv2d(module_dim, module_dim, kernel_size=3, stride=1, padding=1),
stem_act())
elif self.use_feats == 'objects':
self.stem = nn.Linear(in_channels + 4, module_dim)
self.bidirectional = bidirectional
if bidirectional:
rnn_dim = rnn_dim // 2
self.encoder = nn.LSTM(wordvec_dim, rnn_dim, batch_first=True, bidirectional=bidirectional)
if self.separate_syntax_semantics_embeddings:
wordvec_dim *= 2
self.encoder_embed = nn.Embedding(vocab_size, wordvec_dim)
self.encoder_embed.weight.data.uniform_(-1, 1)
self.embedding_dropout = nn.Dropout(p=0.15)
self.question_dropout = nn.Dropout(p=0.08)
self.num_gt_lobs = num_gt_lobs
# self.gt_lobs = nn.Parameter(torch.randn(num_gt_lobs, module_dim))
if num_gt_lobs > 0:
self.gt_lobs = nn.Parameter(torch.randn(num_gt_lobs, module_dim))
else:
self.gt_lobs = None
def forward(self, image, question, question_len):
b_size = question.size(0)
# get image features
if self.use_feats == 'spatial':
img = image
elif self.use_feats == 'objects':
img = image[0]
img = self.stem(img)
if self.use_feats == 'spatial':
img = img
img = img.view(b_size, self.dim, -1)
img = img.permute(0,2,1)
if self.num_gt_lobs > 0:
gt_lobs = self.gt_lobs.expand(b_size, *self.gt_lobs.size())
img = torch.cat([img, gt_lobs], dim=1)
elif self.use_feats == 'objects':
if self.num_gt_lobs > 0:
img_with_lobs = []
for t, length in zip(img, image[1]):
img_with_lobs.append(torch.cat((t[:length], self.gt_lobs, t[length:])))
img = torch.stack(img_with_lobs)
img = (img, image[1])
# get question and contextual word embeddings
embed = self.encoder_embed(question)
embed = self.embedding_dropout(embed)
if self.separate_syntax_semantics_embeddings:
semantics = embed[:, :, self.wordvec_dim:]
embed = embed[:, :, :self.wordvec_dim]
else:
semantics = embed
embed = nn.utils.rnn.pack_padded_sequence(embed, question_len, batch_first=True)
contextual_words, (question_embedding, _) = self.encoder(embed)
if self.bidirectional:
question_embedding = torch.cat([question_embedding[0], question_embedding[1]], -1)
question_embedding = self.question_dropout(question_embedding)
contextual_words, _ = nn.utils.rnn.pad_packed_sequence(contextual_words, batch_first=True)
if self.separate_syntax_semantics:
contextual_words = (contextual_words, semantics)
return question_embedding, contextual_words, img
class OutputUnit(nn.Module):
def __init__(self, module_dim=512, num_answers=28):
super(OutputUnit, self).__init__()
self.question_proj = nn.Linear(module_dim, module_dim)
self.classifier = nn.Sequential(nn.Dropout(0.15),
nn.Linear(module_dim * 2, module_dim),
nn.ELU(),
nn.Dropout(0.15),
nn.Linear(module_dim, num_answers))
def forward(self, question_embedding, memory):
# apply classifier to output of MacCell and the question
question_embedding = self.question_proj(question_embedding)
out = torch.cat([memory, question_embedding], 1)
out = self.classifier(out)
return out
class MACNetwork(nn.Module):
def __init__(self, cfg, vocab, num_answers=28):
super().__init__()
self.cfg = cfg
if getattr(cfg.model, 'separate_syntax_semantics') is True:
cfg.model.input_unit.separate_syntax_semantics = True
cfg.model.control_unit.separate_syntax_semantics = True
cfg.model.input_unit.use_feats = cfg.model.use_feats
cfg.model.read_unit.use_feats = cfg.model.use_feats
cfg.model.read_unit.num_gt_lobs = cfg.model.num_gt_lobs
encoder_vocab_size = len(vocab['question_token_to_idx'])
self.input_unit = InputUnit(
vocab_size=encoder_vocab_size,
num_gt_lobs=cfg.model.num_gt_lobs,
**cfg.model.common,
**cfg.model.input_unit,
)
self.output_unit = OutputUnit(
num_answers=num_answers,
**cfg.model.common,
**cfg.model.output_unit,
)
self.mac = MACUnit(
cfg.model,
# num_gt_lobs=cfg.model.num_gt_lobs,
max_step=cfg.model.max_step,
**cfg.model.common,
)
init_modules(self.modules(), w_init=cfg.TRAIN.WEIGHT_INIT)
nn.init.uniform_(self.input_unit.encoder_embed.weight, -1.0, 1.0)
nn.init.normal_(self.mac.initial_memory)
def forward(self, image, question, question_len):
# get image, word, and sentence embeddings
question_embedding, contextual_words, img = self.input_unit(image, question, question_len)
# apply MacCell
memory = self.mac(contextual_words, question_embedding, img, question_len)
# get classification
out = self.output_unit(question_embedding, memory)
return out
|
"""A shot in MPF."""
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.enable_disable_mixin import EnableDisableMixin
import mpf.core.delays
from mpf.core.events import event_handler
from mpf.core.mode import Mode
from mpf.core.mode_device import ModeDevice
from mpf.core.player import Player
@DeviceMonitor("state", "state_name")
class Shot(EnableDisableMixin, ModeDevice):
"""A device which represents a generic shot."""
config_section = 'shots'
collection = 'shots'
class_label = 'shot'
monitor_enabled = False
"""Class attribute which specifies whether any monitors have been registered
to track shots.
"""
__slots__ = ["delay", "active_sequences", "active_delays", "running_show", "_handlers"]
def __init__(self, machine, name):
"""Initialise shot."""
# If this device is setup in a machine-wide config, make sure it has
# a default enable event.
super(Shot, self).__init__(machine, name)
self.delay = mpf.core.delays.DelayManager(self.machine)
self.active_sequences = list()
"""List of tuples: (id, current_position_index, next_switch)"""
self.active_delays = set()
self.running_show = None
self._handlers = []
def device_loaded_in_mode(self, mode: Mode, player: Player):
"""Add device to a mode that was already started.
Automatically enables the shot and calls the the method
that's usually called when a player's turn starts since that was missed
since the mode started after that.
"""
super().device_loaded_in_mode(mode, player)
self._update_show()
def validate_and_parse_config(self, config: dict, is_mode_config: bool, debug_prefix: str = None):
"""Validate and parse shot config."""
config = super().validate_and_parse_config(config, is_mode_config, debug_prefix)
for switch in config['switch']:
if switch not in config['switches']:
config['switches'].append(switch)
for switch in config['switches'] + list(config['delay_switch'].keys()):
if '{}_active'.format(config['playfield'].name) in switch.tags:
self.raise_config_error(
"Shot '{}' uses switch '{}' which has a "
"'{}_active' tag. This is handled internally by the device. Remove the "
"redundant '{}_active' tag from that switch.".format(
self.name, switch.name, config['playfield'].name,
config['playfield'].name), 1)
return config
def _register_switch_handlers(self):
self._handlers = []
for switch in self.config['switches']:
self._handlers.append(self.machine.events.add_handler("{}_active".format(switch.name),
self.event_hit, priority=self.mode.priority,
blocking_facility="shot"))
for switch in list(self.config['delay_switch'].keys()):
self._handlers.append(self.machine.events.add_handler("{}_active".format(switch.name),
self._delay_switch_hit,
switch_name=switch.name,
priority=self.mode.priority,
blocking_facility="shot"))
def _remove_switch_handlers(self):
self.delay.clear()
self.active_delays = set()
self.machine.events.remove_handlers_by_keys(self._handlers)
self._handlers = []
@event_handler(6)
def event_advance(self, force=False, **kwargs):
"""Handle advance control event."""
del kwargs
self.advance(force)
def advance(self, force=False) -> bool:
"""Advance a shot profile forward.
If this profile is at the last step and configured to loop, it will
roll over to the first step. If this profile is at the last step and not
configured to loop, this method has no effect.
"""
if not self.enabled and not force:
return False
if not self.player:
# no player no state
return False
profile_name = self.config['profile'].name
state = self._get_state()
self.debug_log("Advancing 1 step. Profile: %s, "
"Current State: %s", profile_name, state)
if state + 1 >= len(self.config['profile'].config['states']):
if self.config['profile'].config['loop']:
self._set_state(0)
else:
return False
else:
self.debug_log("Advancing shot by one step.")
self._set_state(state + 1)
self._update_show()
return True
def _stop_show(self):
if not self.running_show:
return
self.running_show.stop()
self.running_show = None
@property
def can_rotate(self):
"""Return if the shot can be rotated according to its profile."""
state = self.state_name
return state not in self.profile.config['state_names_to_not_rotate']
@property
def state_name(self):
"""Return current state name."""
if not self.player:
# no player no state
return "None"
return self.config['profile'].config['states'][self._get_state()]['name']
@property
def state(self):
"""Return current state index."""
return self._get_state()
@property
def profile_name(self):
"""Return profile name."""
return self.config['profile'].name
@property
def profile(self):
"""Return profile."""
return self.config['profile']
def _get_state(self):
if not self.player:
return 0
return self.player["shot_{}".format(self.name)]
def _set_state(self, state):
old = self.player["shot_{}".format(self.name)]
try:
old_name = self.state_name
except IndexError:
# In this case, the shot profile was changed and the old state index
# doesn't exist in the new profile. That's okay, but we can't include
# the old state name in our event.
old_name = "unknown"
self.player["shot_{}".format(self.name)] = state
self.notify_virtual_change("state", old, state)
self.notify_virtual_change("state_name", old_name, self.state_name)
def _get_profile_settings(self):
state = self._get_state()
return self.profile.config['states'][state]
def _update_show(self):
if not self.enabled and not self.profile.config['show_when_disabled']:
self._stop_show()
return
state = self._get_state()
state_settings = self.profile.config['states'][state]
if state_settings['show']: # there's a show specified this state
self._play_show(settings=state_settings)
elif self.profile.config['show']:
# no show for this state, but we have a profile root show
self._play_show(settings=state_settings, start_step=state + 1)
# if neither if/elif above happens, it means the current step has no
# show but the previous step had one. We stop the previous show if there is one
elif self.running_show:
self._stop_show()
def _play_show(self, settings, start_step=None):
manual_advance = settings['manual_advance']
if settings['show']:
show_name = settings['show']
if settings['manual_advance'] is None:
manual_advance = False
else:
show_name = self.profile.config['show']
if settings['manual_advance'] is None:
manual_advance = True
if settings['show_tokens'] and self.config['show_tokens']:
show_tokens = dict(settings['show_tokens'])
show_tokens.update(self.config['show_tokens'])
elif settings['show_tokens']:
show_tokens = settings['show_tokens']
elif self.config['show_tokens']:
show_tokens = self.config['show_tokens']
else:
show_tokens = {}
if show_tokens:
show_tokens = {k: v.evaluate({})
for k, v in show_tokens.items()}
priority = settings['priority'] + self.mode.priority
if not start_step:
start_step = settings['start_step']
self.debug_log("Playing show: %s. %s", show_name, settings)
show_config = self.machine.show_controller.create_show_config(
show_name, priority=priority, speed=settings.get("speed"),
loops=settings.get("loops", -1), sync_ms=settings.get("sync_ms"), manual_advance=manual_advance,
show_tokens=show_tokens, events_when_played=settings.get("events_when_played"),
events_when_stopped=settings.get("events_when_stopped"),
events_when_looped=settings.get("events_when_looped"),
events_when_paused=settings.get("events_when_paused"),
events_when_resumed=settings.get("events_when_resumed"),
events_when_advanced=settings.get("events_when_advanced"),
events_when_stepped_back=settings.get("events_when_stepped_back"),
events_when_updated=settings.get("events_when_updated"),
events_when_completed=settings.get("events_when_completed"))
self.running_show = self.machine.show_controller.replace_or_advance_show(self.running_show, show_config,
start_step)
def device_removed_from_mode(self, mode):
"""Remove this shot device.
Destroys it and removes it from the shots collection.
"""
super().device_removed_from_mode(mode)
self._remove_switch_handlers()
if self.running_show:
self.running_show.stop()
self.running_show = None
@event_handler(5)
def event_hit(self, **kwargs):
"""Handle hit control event."""
success = self.hit()
if not success:
return None
if self.profile.config['block']:
min_priority = kwargs.get("_min_priority", {"all": 0})
min_shots = min_priority.get("shot", 0)
min_priority["shot"] = self.mode.priority if self.mode.priority > min_shots else min_shots
return {"_min_priority": min_priority}
return None
def hit(self) -> bool:
"""Advance the currently-active shot profile.
Note that the shot must be enabled in order for this hit to be
processed.
Returns true if the shot was enabled or false if the hit has been ignored.
"""
# mark the playfield active no matter what
self.config['playfield'].mark_playfield_active_from_device_action()
if not self.enabled or not self.player:
return False
# Stop if there is an active delay but no sequence
if self.active_delays:
return False
profile_settings = self._get_profile_settings()
if not profile_settings:
return False
state = profile_settings['name']
self.debug_log("Hit! Profile: %s, State: %s",
self.profile_name, state)
if self.profile.config['advance_on_hit']:
self.debug_log("Advancing shot because advance_on_hit is True.")
advancing = self.advance()
else:
self.debug_log('Not advancing shot')
advancing = False
self._notify_monitors(self.config['profile'].name, state)
self.machine.events.post('{}_hit'.format(self.name),
profile=self.profile_name, state=state, advancing=advancing)
'''event: (name)_hit
desc: The shot called (name) was just hit.
Note that there are four events posted when a shot is hit, each
with variants of the shot name, profile, and current state,
allowing you to key in on the specific granularity you need.
args:
profile: The name of the profile that was active when hit.
state: The name of the state the profile was in when it was hit'''
self.machine.events.post('{}_{}_hit'.format(self.name, self.profile_name),
profile=self.profile_name, state=state, advancing=advancing)
'''event: (name)_(profile)_hit
desc: The shot called (name) was just hit with the profile (profile)
active.
Note that there are four events posted when a shot is hit, each
with variants of the shot name, profile, and current state,
allowing you to key in on the specific granularity you need.
Also remember that shots can have more than one active profile at a
time (typically each associated with a mode), so a single hit to this
shot might result in this event being posted multiple times with
different (profile) values.
args:
profile: The name of the profile that was active when hit.
state: The name of the state the profile was in when it was hit'''
self.machine.events.post('{}_{}_{}_hit'.format(self.name, self.profile_name, state),
profile=self.profile_name, state=state, advancing=advancing)
'''event: (name)_(profile)_(state)_hit
desc: The shot called (name) was just hit with the profile (profile)
active in the state (state).
Note that there are four events posted when a shot is hit, each
with variants of the shot name, profile, and current state,
allowing you to key in on the specific granularity you need.
Also remember that shots can have more than one active profile at a
time (typically each associated with a mode), so a single hit to this
shot might result in this event being posted multiple times with
different (profile) and (state) values.
args:
profile: The name of the profile that was active when hit.
state: The name of the state the profile was in when it was hit'''
self.machine.events.post('{}_{}_hit'.format(self.name, state),
profile=self.profile_name, state=state, advancing=advancing)
'''event: (name)_(state)_hit
desc: The shot called (name) was just hit while in the profile (state).
Note that there are four events posted when a shot is hit, each
with variants of the shot name, profile, and current state,
allowing you to key in on the specific granularity you need.
Also remember that shots can have more than one active profile at a
time (typically each associated with a mode), so a single hit to this
shot might result in this event being posted multiple times with
different (profile) and (state) values.
args:
profile: The name of the profile that was active when hit.
state: The name of the state the profile was in when it was hit'''
return True
def _notify_monitors(self, profile, state):
if Shot.monitor_enabled and "shots" in self.machine.monitors:
for callback in self.machine.monitors['shots']:
callback(name=self.name, profile=profile, state=state)
@event_handler(4)
def _delay_switch_hit(self, switch_name, **kwargs):
del kwargs
self.config['playfield'].mark_playfield_active_from_device_action()
if not self.enabled:
return
self.delay.reset(name=switch_name + '_delay_timer',
ms=self.config['delay_switch']
[self.machine.switches[switch_name]],
callback=self._release_delay,
switch=switch_name)
self.active_delays.add(switch_name)
def _release_delay(self, switch):
self.active_delays.remove(switch)
def jump(self, state, force=True, force_show=False):
"""Jump to a certain state in the active shot profile.
Args:
state: int of the state number you want to jump to. Note that states
are zero-based, so the first state is 0.
force: if true, will jump even if the shot is disabled
force_show: if true, will update the profile show even if the jumped
state index is the same as before the jump
"""
self.debug_log("Received jump request. State: %s, Force: %s", state, force)
if not self.enabled and not force:
self.debug_log("Profile is disabled and force is False. Not jumping")
return
if not self.player:
# no player no state
return
current_state = self._get_state()
if state == current_state and not force_show:
self.debug_log("Shot is already in the jump destination state")
return
self.debug_log("Jumping to profile state '%s'", state)
self._set_state(state)
self._update_show()
@event_handler(1)
def event_reset(self, **kwargs):
"""Handle reset control event."""
del kwargs
self.reset()
def reset(self):
"""Reset the shot profile for the passed mode back to the first state (State 0) and reset all sequences."""
self.debug_log("Resetting.")
self.jump(state=0)
@event_handler(2)
def event_restart(self, **kwargs):
"""Handle restart control event."""
del kwargs
self.restart()
def restart(self):
"""Restart the shot profile by calling reset() and enable().
Automatically called when one fo the restart_events is called.
"""
self.reset()
self.enable()
def _enable(self):
super()._enable()
self._register_switch_handlers()
self._update_show()
def _disable(self):
super()._disable()
self._remove_switch_handlers()
self._update_show()
|
tb = [-1]*1005
def fib_dp(n):
global tb
if n == 0:
return 0
if n == 1:
return 1
if tb[n] != -1:
return tb[n]
tb[n] = (fib_dp(n - 1) + fib_dp(n - 2)) % 1000000007
return tb[n]
while True:
try:
n = int(input())
f = fib_dp(n)
print("fib({})= {}".format(n, f))
except EOFError:
break
|
from vunit import VUnit
# Create VUnit instance by parsing command line arguments
vu = VUnit.from_argv()
vu.add_osvvm()
# Create library 'lib'
lib = vu.add_library("lib")
# Add all files ending in .vhd in current working directory to library
lib.add_source_files("*.vhd")
# Run vunit function
vu.main()
|
from __future__ import print_function
from Components.config import config
from enigma import eTimer
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
from RSSFeed import BaseFeed, UniversalFeed
from twisted.web.client import getPage
from xml.etree.cElementTree import fromstring as cElementTree_fromstring
from GoogleReader import GoogleReader
NOTIFICATIONID = 'SimpleRSSUpdateNotification'
update_callbacks = []
class RSSPoller:
"""Keeps all Feed and takes care of (automatic) updates"""
def __init__(self, poll = True):
# Timer
self.poll_timer = eTimer()
self.poll_timer_conn = self.poll_timer.timeout.connect(self.poll)
self.do_poll = poll
# this indicates we're reloading the list of feeds
self.reloading = False
self.newItemFeed = BaseFeed(
"",
_("New Items"),
_("New Items since last Auto-Update"),
)
# Generate Feeds
self.feeds = [
UniversalFeed(
x.uri.value,
x.autoupdate.value
)
for x in config.plugins.simpleRSS.feed
]
if not config.plugins.simpleRSS.enable_google_reader.value:
if poll:
self.poll_timer.start(0, 1)
else:
self.googleReader = GoogleReader(config.plugins.simpleRSS.google_username.value, config.plugins.simpleRSS.google_password.value)
self.googleReader.login().addCallback(self.googleLoggedIn).addErrback(self.googleLoginFailed)
# Initialize Vars
self.current_feed = 0
def googleLoggedIn(self, sid = None):
self.googleReader.getSubscriptionList().addCallback(self.googleSubscriptionList).addErrback(self.googleSubscriptionFailed)
def googleLoginFailed(self, res = None):
AddPopup(
_("Failed to login to Google Reader."),
MessageBox.TYPE_ERROR,
5,
)
self.reloading = False
if self.do_poll:
self.poll_timer.start(0, 1)
def googleSubscriptionList(self, subscriptions = None):
self.feeds.extend(subscriptions)
self.reloading = False
if self.do_poll:
self.doCallback()
self.poll_timer.start(0, 1)
def googleSubscriptionFailed(self, res = None):
AddPopup(
_("Failed to get subscriptions from Google Reader."),
MessageBox.TYPE_ERROR,
5,
)
self.reloading = False
if self.do_poll:
self.poll_timer.start(0, 1)
def addCallback(self, callback):
if callback not in update_callbacks:
update_callbacks.append(callback)
def removeCallback(self, callback):
if callback in update_callbacks:
update_callbacks.remove(callback)
def doCallback(self, id = None):
for callback in update_callbacks:
try:
callback(id)
except Exception:
pass
def error(self, error = ""):
print("[SimpleRSS] failed to fetch feed:", error)
# Assume its just a temporary failure and jump over to next feed
self.next_feed()
def _gotPage(self, data, id = None, callback = False, errorback = None):
# workaround: exceptions in gotPage-callback were ignored
try:
self.gotPage(data, id)
if callback:
self.doCallback(id)
except NotImplementedError as errmsg:
# Don't show this error when updating in background
if id is not None:
AddPopup(
_("Sorry, this type of feed is unsupported:\n%s") % (str(errmsg)),
MessageBox.TYPE_INFO,
5,
)
else:
# We don't want to stop updating just because one feed is broken
self.next_feed()
except Exception:
import traceback, sys
traceback.print_exc(file=sys.stdout)
# Errorback given, call it (asumme we don't need do restart timer!)
if errorback is not None:
errorback()
return
# Assume its just a temporary failure and jump over to next feed
self.next_feed()
def gotPage(self, data, id = None):
feed = cElementTree_fromstring(data)
# For Single-Polling
if id is not None:
self.feeds[id].gotFeed(feed)
print("[SimpleRSS] single feed parsed...")
return
new_items = self.feeds[self.current_feed].gotFeed(feed)
print("[SimpleRSS] feed parsed...")
# Append new items to locally bound ones
if new_items is not None:
self.newItemFeed.history.extend(new_items)
# Start Timer so we can either fetch next feed or show new_items
self.next_feed()
def singlePoll(self, id, callback = False, errorback = None):
getPage(self.feeds[id].uri).addCallback(self._gotPage, id, callback, errorback).addErrback(errorback)
def poll(self):
# Reloading, reschedule
if self.reloading:
print("[SimpleRSS] timer triggered while reloading, rescheduling")
self.poll_timer.start(10000, 1)
# End of List
elif len(self.feeds) <= self.current_feed:
# New Items
if self.newItemFeed.history:
print("[SimpleRSS] got new items, calling back")
self.doCallback()
# Inform User
update_notification_value = config.plugins.simpleRSS.update_notification.value
if update_notification_value == "preview":
from RSSScreens import RSSFeedView
from Tools.Notifications import AddNotificationWithID, RemovePopup
RemovePopup(NOTIFICATIONID)
AddNotificationWithID(
NOTIFICATIONID,
RSSFeedView,
self.newItemFeed,
newItems = True
)
elif update_notification_value == "notification":
AddPopup(
_("Received %d new news item(s).") % (len(self.newItemFeed.history)),
MessageBox.TYPE_INFO,
5,
NOTIFICATIONID
)
elif update_notification_value == "ticker":
from RSSTickerView import tickerView
if not tickerView:
print("[SimpleRSS] missing ticker instance, something with my code is wrong :-/")
else:
tickerView.display(self.newItemFeed)
# No new Items
else:
print("[SimpleRSS] no new items")
self.current_feed = 0
self.poll_timer.startLongTimer(config.plugins.simpleRSS.interval.value*60)
# It's updating-time
else:
# Assume we're cleaning history if current feed is 0
clearHistory = self.current_feed == 0
if config.plugins.simpleRSS.update_notification.value != "none":
from Tools import Notifications
if hasattr(Notifications, 'notificationQueue'):
notifications = Notifications.notificationQueue.queue
current_notifications = Notifications.notificationQueue.current
handler = lambda note: (note.fnc, note.screen, note.args, note.kwargs, note.id)
handler_current = lambda note: (note[0].id,)
else:
notifications = Notifications.notifications
current_notifications = Notifications.current_notifications
handler_current = handler = lambda note: note
for x in current_notifications:
if handler_current(x)[0] == NOTIFICATIONID:
print("[SimpleRSS] timer triggered while preview on screen, rescheduling")
self.poll_timer.start(10000, 1)
return
if clearHistory:
for x in notifications:
if handler(x)[4] == NOTIFICATIONID:
print("[SimpleRSS] wont wipe history because it was never read")
clearHistory = False
break
if clearHistory:
del self.newItemFeed.history[:]
# Feed supposed to autoupdate
feed = self.feeds[self.current_feed]
if feed.autoupdate:
getPage(feed.uri).addCallback(self._gotPage).addErrback(self.error)
# Go to next feed
else:
print("[SimpleRSS] passing feed")
self.next_feed()
def next_feed(self):
self.current_feed += 1
self.poll_timer.start(1000, 1)
def shutdown(self):
self.poll_timer_conn = None
self.poll_timer = None
self.do_poll = False
def triggerReload(self):
self.reloading = True
newfeeds = []
oldfeeds = self.feeds
found = False
for x in config.plugins.simpleRSS.feed:
for feed in oldfeeds:
if x.uri.value == feed.uri:
# Update possibly different autoupdate value
feed.autoupdate = x.autoupdate.value
newfeeds.append(feed) # Append to new Feeds
oldfeeds.remove(feed) # Remove from old Feeds
found = True
break
if not found:
newfeeds.append(
UniversalFeed(
x.uri.value,
x.autoupdate.value
))
found = False
self.feeds = newfeeds
if config.plugins.simpleRSS.enable_google_reader.value:
self.googleReader = GoogleReader(config.plugins.simpleRSS.google_username.value, config.plugins.simpleRSS.google_password.value)
self.googleReader.login().addCallback(self.googleLoggedIn).addErrback(self.googleLoginFailed)
else:
self.reloading = False
|
import importlib
import os
import requests
import uuid
import shutil
from resources.series.model.serie import Serie
from resources.series.common.utils import Utils
from resources.series.common.log import Log
from resources.series.common.code import Code
class Subtitle:
@staticmethod
def _getPath():
return Utils.getDataDir() + "/subtitles"
@staticmethod
def updateSerie(serie, save = True):
for episode in serie.episodes:
if episode.subtitle: continue
for plugin_module, plugin_class in Utils.getPlugins("subtitle").items():
if plugin_class in serie.__dict__:
names = list(serie.names)
names.remove(serie.__dict__[plugin_class])
names.insert(0, serie.__dict__[plugin_class])
else: names = serie.names
for name in names:
link, code = Subtitle.searchUrl(name, episode, plugin_module, plugin_class)
Log.info(serie = name, season = episode.season, episode = episode.episode, plugin = plugin_class, code = code, message = "Searching subtitle")
if not link: continue
episode.subtitle = True
serie.__dict__[plugin_class] = name
if save: serie.write()
break
if episode.torrent: break
return serie
@staticmethod
def download(url, serie, episode):
id = str(uuid.uuid4())
working_dir = Subtitle._getPath() + "/" + id
if not os.path.exists(working_dir): os.makedirs(working_dir)
try:
response = requests.get(url)
if response.status_code != 200: return None, Subtitle.connectionError
if "content-disposition" in response.headers: download_path = working_dir + "/" + response.headers['content-disposition']
elif "rar" in response.headers['Content-Type']: download_path = working_dir + "/" + id + ".rar"
elif "zip" in response.headers['Content-Type']: download_path = working_dir + "/" + id + ".zip"
else: download_path = working_dir + "/" + id + ".srt"
with open(download_path, 'wb') as download_file: download_file.write(response.content)
response.close()
download_file.close()
except:
shutil.rmtree(working_dir)
return None, Code.connectionError
#subtitle_file = Subtitle.subtitles_path + "/" + serie.name.capitalize() + " S" + str(episode.season).zfill(2) + "E" + str(episode.episode).zfill(2)
subtitle_file, success = Utils.unpack(download_path, working_dir, Subtitle._getPath())
shutil.rmtree(working_dir)
if success: return subtitle_file, Code.found
else: return None, Code.notUnpack
@staticmethod
def searchUrl(name, episode, plugin_module, plugin_class):
plugin_module = importlib.import_module(plugin_module)
plugin_class = getattr(plugin_module, plugin_class)
return plugin_class.searchUrl(name, episode)
@staticmethod
def searchCandidates(name, episode, plugin_module, plugin_class):
plugin_module = importlib.import_module(plugin_module)
plugin_class = getattr(plugin_module, plugin_class)
return plugin_class.searchCandidates(name, episode) |
import sys
import os
import re
import subprocess
import shutil
import glob
import logging
import hashlib
def get_log():
log = logging.getLogger()
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("[%(levelname)s] %(message)s"))
log.addHandler(handler)
return log
def run(cmd, ignore_return_codes=[], stdout=None, log=None, silent=False):
if not silent:
msg = "Running: {0}".format(" ".join(cmd))
if log:
log.info(msg)
else:
print(msg)
ret = subprocess.call(cmd, stdout=stdout)
if ret != 0 and ret not in ignore_return_codes:
msg = "Return code: {0}".format(ret)
if log:
log.error(msg)
else:
print(msg)
sys.exit(2)
return ret
def run_output(cmd, ignore_return_codes=[], log=None, silent=False):
if not silent:
msg = "Running: {0}".format(" ".join(cmd))
if log:
log.info(msg)
else:
print(msg)
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if e.returncode in ignore_return_codes:
return e.output
msg = "{0}\n\nReturn code: {1}".format(e.output, e.returncode)
if log:
log.error(msg)
else:
print(msg)
sys.exit(2)
def get_hash(input_file, args):
if not input_file or not os.path.isfile(input_file):
return None
args = sorted(args)
m = hashlib.md5()
with open(input_file, 'r') as f:
m.update(f.read())
for arg in args:
m.update(arg)
return m.hexdigest()
def find_in_cache(cache_path, input_file, args):
h = get_hash(input_file, args)
if not h:
return None
fname = os.path.join(cache_path, h)
if os.path.isfile(fname):
return fname
return None
def cache_file(cache_path, fname, input_file, args):
if not os.path.isfile(fname):
return
h = get_hash(input_file, args)
if not h:
return
out = os.path.join(cache_path, h)
shutil.copy(fname, out)
def replace_in_file(fname, pats, out_name=None, log=None):
"""Replaces patterns in a given file."""
if not os.path.isfile(fname):
if log:
log.error("'{0}' does not exist", fname)
return
with open(fname, 'r') as f:
lines = f.readlines()
def edit_line(line):
for (guard, pattern, repl) in pats:
if guard == "" or re.search(guard, line):
line = re.sub(pattern, repl, line)
return line
result = []
for line in lines:
result.append(edit_line(line))
if not out_name:
out_name = fname
with open(out_name, 'w') as f:
f.write("".join(result))
def find_in_file(fname, pat, groups=None):
if not os.path.isfile(fname):
return
with open(fname, 'r') as f:
text = f.read()
m = re.search(pat, text)
if not m:
return None
if not groups:
return m.group(0)
if not isinstance(groups, list):
return m.group(groups)
result = []
for group in groups:
result.append(m.group(group))
return result
def remove_all(base, name_pat):
for f in glob.glob(os.path.join(base, name_pat)):
if os.path.isfile(f):
os.remove(f)
def remove_files(files):
for f in files:
if os.path.isfile(f):
os.remove(f)
|
#!/usr/bin/env python
# pylint: disable=no-member
"""
Run the sample-metadata API through the analysis runner
in a very generic, customisable way!
For example:
python scripts/arbitrary_sm.py \
sample get_sample_by_external_id \
--json '{"project": "acute-care", "external_id": "<external-id>"}'
"""
import subprocess
from typing import List
import argparse
import json
import sample_metadata
def run_sm(
api_name: str, method_name: str, args: List[str] = None, kwargs: dict = None
):
"""
Use the sample metadata API based on:
:param api_name: pure name of API, eg: 'analysis'
:param method_name: name of method in snake case
:param args: positional args of endpoint
:param kwargs: keyword arguments of endpoint, note:
POST requests have funny kwarg names, eg:
'body_get_samples_by_criteria_api_v1_sample_post'
"""
api_class_name = api_name.title() + 'Api'
api = getattr(sample_metadata.api, api_class_name)
api_instance = api()
response = getattr(api_instance, method_name)(*(args or []), **(kwargs or {}))
return response
def from_args(args):
"""Collect args from argparser, and call 'run_sm'"""
positional_args: List[str] = args.args
kwargs = {}
if args.file_to_localise:
for file in args.file_to_localise:
subprocess.check_output(['gsutil', 'cp', file, '.'])
json_str = args.json
if json_str:
kwargs = json.loads(json_str)
return run_sm(
api_name=args.api_name,
method_name=args.method_name,
args=positional_args,
kwargs=kwargs,
)
def main(args=None):
"""Main function, parses sys.argv"""
parser = argparse.ArgumentParser('Arbitrary sample-metadata script')
parser.add_argument('api_name')
parser.add_argument('method_name')
parser.add_argument(
'--file-to-localise', action='append', help='List of GS files to localise'
)
parser.add_argument('--json', help='JSON encoded dictionary for kwargs')
parser.add_argument(
'--args',
nargs='+',
help='any positional arguments to pass to the API',
)
response = from_args(parser.parse_args(args))
print(response)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Unlimited developers
"""
Tests the electrum call 'blockchain.transaction.get_merkle'
"""
import asyncio
from test_framework.util import assert_equal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.electrumutil import (
ERROR_CODE_INVALID_PARAMS,
ElectrumConnection,
assert_response_error,
bitcoind_electrum_args,
wait_for_electrum_mempool,
)
class ElectrumGetMerkle(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [bitcoind_electrum_args()]
def run_test(self):
n = self.nodes[0]
n.generate(110)
async def async_tests(loop):
cli = ElectrumConnection(loop)
await cli.connect()
await self.test_basic(n, cli)
#await cli.disconnect();
# TODO: Test the merkle proof
loop = asyncio.get_event_loop()
loop.run_until_complete(async_tests(loop))
async def test_basic(self, n, cli):
txid = n.sendtoaddress(n.getnewaddress(), 1)
wait_for_electrum_mempool(n, count = 1)
# Invalid request, should throw "not confirmed" error
await assert_response_error(
lambda: cli.call("blockchain.transaction.get_merkle", txid),
ERROR_CODE_INVALID_PARAMS,
"is not confirmed in a block")
n.generate(1)
wait_for_electrum_mempool(n, count = 0)
# Test valid request
height = n.getblockcount()
res1 = await cli.call("blockchain.transaction.get_merkle", txid, height)
# ElectrsCash allows height to be optional (outside of specification)
res2 = await cli.call("blockchain.transaction.get_merkle", txid)
assert_equal(res1, res2)
assert_equal(height, res1['block_height'])
assert 'merkle' in res1
assert_equal(1, res1['pos'])
if __name__ == '__main__':
ElectrumGetMerkle().main()
|
import itertools
from hazelcast.future import combine_futures, ImmediateFuture
from hazelcast.near_cache import NearCache
from hazelcast.protocol.codec import map_add_entry_listener_codec, map_add_entry_listener_to_key_codec, \
map_add_entry_listener_with_predicate_codec, map_add_entry_listener_to_key_with_predicate_codec, \
map_add_index_codec, map_clear_codec, map_contains_key_codec, map_contains_value_codec, map_delete_codec, \
map_entry_set_codec, map_entries_with_predicate_codec, map_evict_codec, map_evict_all_codec, map_flush_codec, \
map_force_unlock_codec, map_get_codec, map_get_all_codec, map_get_entry_view_codec, map_is_empty_codec, \
map_is_locked_codec, map_key_set_codec, map_key_set_with_predicate_codec, map_load_all_codec, \
map_load_given_keys_codec, map_lock_codec, map_put_codec, map_put_all_codec, map_put_if_absent_codec, \
map_put_transient_codec, map_size_codec, map_remove_codec, map_remove_if_same_codec, \
map_remove_entry_listener_codec, map_replace_codec, map_replace_if_same_codec, map_set_codec, map_try_lock_codec, \
map_try_put_codec, map_try_remove_codec, map_unlock_codec, map_values_codec, map_values_with_predicate_codec, \
map_add_interceptor_codec, map_execute_on_all_keys_codec, map_execute_on_key_codec, map_execute_on_keys_codec, \
map_execute_with_predicate_codec, map_add_near_cache_entry_listener_codec
from hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType, get_entry_listener_flags
from hazelcast.util import check_not_none, thread_id, to_millis
class Map(Proxy):
def add_entry_listener(self, include_value=False, key=None, predicate=None, added=None, removed=None, updated=None,
evicted=None, evict_all=None, clear_all=None, merged=None, expired=None):
flags = get_entry_listener_flags(added=added, removed=removed, updated=updated,
evicted=evicted, evict_all=evict_all, clear_all=clear_all, merged=merged,
expired=expired)
if key and predicate:
key_data = self._to_data(key)
predicate_data = self._to_data(predicate)
request = map_add_entry_listener_to_key_with_predicate_codec.encode_request(self.name, key_data,
predicate_data, include_value,
flags, False)
elif key and not predicate:
key_data = self._to_data(key)
request = map_add_entry_listener_to_key_codec.encode_request(self.name, key_data, include_value, flags,
False)
elif not key and predicate:
predicate = self._to_data(predicate)
request = map_add_entry_listener_with_predicate_codec.encode_request(self.name, predicate, include_value,
flags, False)
else:
request = map_add_entry_listener_codec.encode_request(self.name, include_value, flags, False)
def handle_event_entry(**_kwargs):
event = EntryEvent(self._to_object, **_kwargs)
if event.event_type == EntryEventType.added:
added(event)
elif event.event_type == EntryEventType.removed:
removed(event)
elif event.event_type == EntryEventType.updated:
updated(event)
elif event.event_type == EntryEventType.evicted:
evicted(event)
elif event.event_type == EntryEventType.evict_all:
evict_all(event)
elif event.event_type == EntryEventType.clear_all:
clear_all(event)
elif event.event_type == EntryEventType.merged:
merged(event)
elif event.event_type == EntryEventType.expired:
expired(event)
return self._start_listening(request, lambda m: map_add_entry_listener_codec.handle(m, handle_event_entry),
lambda r: map_add_entry_listener_codec.decode_response(r)['response'])
def add_index(self, attribute, ordered=False):
return self._encode_invoke(map_add_index_codec, attribute=attribute, ordered=ordered)
def add_interceptor(self, interceptor):
return self._encode_invoke(map_add_interceptor_codec, interceptor=self._to_data(interceptor))
def clear(self):
return self._encode_invoke(map_clear_codec)
def contains_key(self, key):
"""
:param key:
:return:
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._contains_key_internal(key_data)
def contains_value(self, value):
check_not_none(value, "value can't be None")
value_data = self._to_data(value)
return self._encode_invoke(map_contains_value_codec, value=value_data)
def delete(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._delete_internal(key_data)
def entry_set(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_entries_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_entry_set_codec)
def evict(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._evict_internal(key_data)
def evict_all(self):
return self._encode_invoke(map_evict_all_codec)
def execute_on_entries(self, entry_processor, predicate=None):
if predicate:
return self._encode_invoke(map_execute_with_predicate_codec, entry_processor=self._to_data(entry_processor),
predicate=self._to_data(predicate))
return self._encode_invoke(map_execute_on_all_keys_codec, entry_processor=self._to_data(entry_processor))
def execute_on_key(self, key, entry_processor):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._execute_on_key_internal(key_data, entry_processor)
def execute_on_keys(self, keys, entry_processor):
key_list = []
for key in keys:
check_not_none(key, "key can't be None")
key_list.append(self._to_data(key))
return self._encode_invoke(map_execute_on_keys_codec, entry_processor=self._to_data(entry_processor),
keys=key_list)
def flush(self):
return self._encode_invoke(map_flush_codec)
def force_unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_force_unlock_codec, key_data, key=key_data)
def get(self, key):
"""
:param key:
:return:
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._get_internal(key_data)
def get_all(self, keys):
check_not_none(keys, "keys can't be None")
if not keys:
return ImmediateFuture({})
partition_service = self._client.partition_service
partition_to_keys = {}
for key in keys:
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
partition_id = partition_service.get_partition_id(key_data)
try:
partition_to_keys[partition_id][key] = key_data
except KeyError:
partition_to_keys[partition_id] = {key: key_data}
return self._get_all_internal(partition_to_keys)
def get_entry_view(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_get_entry_view_codec, key_data, key=key_data, thread_id=thread_id())
def is_empty(self):
return self._encode_invoke(map_is_empty_codec)
def is_locked(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_is_locked_codec, key_data, key=key_data)
def key_set(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_key_set_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_key_set_codec)
def load_all(self, keys=None, replace_existing_values=True):
if keys:
key_data_list = map(self._to_data, keys)
return self._load_all_internal(key_data_list, replace_existing_values)
else:
return self._encode_invoke(map_load_all_codec, replace_existing_values=replace_existing_values)
def lock(self, key, ttl=-1):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_lock_codec, key_data, key=key_data, thread_id=thread_id(), ttl=to_millis(ttl))
def put(self, key, value, ttl=-1):
"""
:param key:
:param value:
:param ttl:
:return:
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._put_internal(key_data, value_data, ttl)
def put_all(self, map):
check_not_none(map, "map can't be None")
if not map:
return ImmediateFuture(None)
partition_service = self._client.partition_service
partition_map = {}
for key, value in map.iteritems():
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
entry = (self._to_data(key), self._to_data(value))
partition_id = partition_service.get_partition_id(entry[0])
try:
partition_map[partition_id].append(entry)
except KeyError:
partition_map[partition_id] = [entry]
futures = []
for partition_id, entry_list in partition_map.iteritems():
future = self._encode_invoke_on_partition(map_put_all_codec, partition_id, entries=dict(entry_list))
futures.append(future)
return combine_futures(*futures)
def put_if_absent(self, key, value, ttl=-1):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._put_if_absent_internal(key_data, value_data, ttl)
def put_transient(self, key, value, ttl=-1):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._put_transient_internal(key_data, value_data, ttl)
def remove(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._remove_internal(key_data)
def remove_if_same(self, key, value):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._remove_if_same_internal_(key_data, value_data)
def remove_entry_listener(self, registration_id):
return self._stop_listening(registration_id,
lambda i: map_remove_entry_listener_codec.encode_request(self.name, i))
def replace(self, key, value):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._replace_internal(key_data, value_data)
def replace_if_same(self, key, old_value, new_value):
check_not_none(key, "key can't be None")
check_not_none(old_value, "old_value can't be None")
check_not_none(new_value, "new_value can't be None")
key_data = self._to_data(key)
old_value_data = self._to_data(old_value)
new_value_data = self._to_data(new_value)
return self._replace_if_same_internal(key_data, old_value_data, new_value_data)
def set(self, key, value, ttl=-1):
"""
:param key:
:param value:
:param ttl:
:return:
"""
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._set_internal(key_data, value_data, ttl)
def size(self):
return self._encode_invoke(map_size_codec)
def try_lock(self, key, ttl=-1, timeout=0):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_try_lock_codec, key_data, key=key_data, thread_id=thread_id(), lease=to_millis(ttl),
timeout=to_millis(timeout))
def try_put(self, key, value, timeout=0):
check_not_none(key, "key can't be None")
check_not_none(value, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._try_put_internal(key_data, value_data, timeout)
def try_remove(self, key, timeout=0):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._try_remove_internal(key_data, timeout)
def unlock(self, key):
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(map_unlock_codec, key_data, key=key_data, thread_id=thread_id())
def values(self, predicate=None):
if predicate:
predicate_data = self._to_data(predicate)
return self._encode_invoke(map_values_with_predicate_codec, predicate=predicate_data)
else:
return self._encode_invoke(map_values_codec)
# internals
def _contains_key_internal(self, key_data):
return self._encode_invoke_on_key(map_contains_key_codec, key_data, key=key_data, thread_id=thread_id())
def _get_internal(self, key_data):
return self._encode_invoke_on_key(map_get_codec, key_data, key=key_data, thread_id=thread_id())
def _get_all_internal(self, partition_to_keys, futures=None):
if futures is None:
futures = []
for partition_id, key_dict in partition_to_keys.iteritems():
future = self._encode_invoke_on_partition(map_get_all_codec, partition_id, keys=key_dict.values())
futures.append(future)
def merge(f):
return dict(itertools.chain(*f.result()))
return combine_futures(*futures).continue_with(merge)
def _remove_internal(self, key_data):
return self._encode_invoke_on_key(map_remove_codec, key_data, key=key_data, thread_id=thread_id())
def _remove_if_same_internal_(self, key_data, value_data):
return self._encode_invoke_on_key(map_remove_if_same_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id())
def _delete_internal(self, key_data):
return self._encode_invoke_on_key(map_delete_codec, key_data, key=key_data, thread_id=thread_id())
def _put_internal(self, key_data, value_data, ttl):
return self._encode_invoke_on_key(map_put_codec, key_data, key=key_data, value=value_data, thread_id=thread_id(),
ttl=to_millis(ttl))
def _set_internal(self, key_data, value_data, ttl):
return self._encode_invoke_on_key(map_set_codec, key_data, key=key_data, value=value_data, thread_id=thread_id(),
ttl=to_millis(ttl))
def _try_remove_internal(self, key_data, timeout):
return self._encode_invoke_on_key(map_try_remove_codec, key_data, key=key_data, thread_id=thread_id(),
timeout=to_millis(timeout))
def _try_put_internal(self, key_data, value_data, timeout):
return self._encode_invoke_on_key(map_try_put_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(), timeout=to_millis(timeout))
def _put_transient_internal(self, key_data, value_data, ttl):
return self._encode_invoke_on_key(map_put_transient_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(), ttl=to_millis(ttl))
def _put_if_absent_internal(self, key_data, value_data, ttl):
return self._encode_invoke_on_key(map_put_if_absent_codec, key_data, key=key_data, value=value_data,
thread_id=thread_id(), ttl=to_millis(ttl))
def _replace_if_same_internal(self, key_data, old_value_data, new_value_data):
return self._encode_invoke_on_key(map_replace_if_same_codec, key_data, key=key_data, test_value=old_value_data,
value=new_value_data, thread_id=thread_id())
def _replace_internal(self, key_data, value_data):
return self._encode_invoke_on_key(map_replace_codec, key_data, key=key_data, value=value_data, thread_id=thread_id())
def _evict_internal(self, key_data):
return self._encode_invoke_on_key(map_evict_codec, key_data, key=key_data, thread_id=thread_id())
def _load_all_internal(self, key_data_list, replace_existing_values):
return self._encode_invoke(map_load_given_keys_codec, keys=key_data_list, replace_existing_values=replace_existing_values)
def _execute_on_key_internal(self, key_data, entry_processor):
return self._encode_invoke_on_key(map_execute_on_key_codec, key_data, key=key_data,
entry_processor=self._to_data(entry_processor), thread_id=thread_id())
class MapFeatNearCache(Map):
def __init__(self, client, service_name, name):
super(MapFeatNearCache, self).__init__(client, service_name, name)
near_cache_config = client.config.near_cache_configs.get(name, None)
if near_cache_config is None:
raise ValueError("NearCache config cannot be None here!")
self._invalidation_listener_id = None
self._near_cache = create_near_cache(client.serialization_service, near_cache_config)
if near_cache_config.invalidate_on_change:
self._add_near_cache_invalidation_listener()
def clear(self):
self._near_cache.clear()
return super(MapFeatNearCache, self).clear()
def evict_all(self):
self._near_cache.clear()
return super(MapFeatNearCache, self).evict_all()
def load_all(self, keys=None, replace_existing_values=True):
if keys is None and replace_existing_values:
self._near_cache.clear()
return super(MapFeatNearCache, self).load_all(keys, replace_existing_values)
def _on_destroy(self):
self._remove_near_cache_invalidation_listener()
self._near_cache.clear()
super(MapFeatNearCache, self)._on_destroy()
def _add_near_cache_invalidation_listener(self):
def handle(message):
map_add_near_cache_entry_listener_codec.handle(message, self._handle_invalidation, self._handle_batch_invalidation)
def handle_decode(message):
return map_add_near_cache_entry_listener_codec.decode_response(message)['response']
try:
request = map_add_near_cache_entry_listener_codec.encode_request(self.name, EntryEventType.invalidation, False)
self._invalidation_listener_id = self._start_listening(request, handle, handle_decode)
except:
self.logger.severe("-----------------\n Near Cache is not initialized!!! \n-----------------")
def _remove_near_cache_invalidation_listener(self):
if self._invalidation_listener_id:
self.remove_entry_listener(self._invalidation_listener_id)
def _handle_invalidation(self, key_data):
# null key means near cache has to remove all entries in it.
# see MapAddNearCacheEntryListenerMessageTask.
if key_data is None:
self._near_cache.clear()
else:
del self._near_cache[key_data]
def _handle_batch_invalidation(self, key_data_list):
for key_data in key_data_list:
del self._near_cache[key_data]
def _invalidate_cache(self, key_data):
try:
del self._near_cache[key_data]
except KeyError:
# There is nothing to invalidate
pass
def _invalidate_cache_batch(self, key_data_list):
for key_data in key_data_list:
try:
del self._near_cache[key_data]
except KeyError:
# There is nothing to invalidate
pass
# internals
def _contains_key_internal(self, key_data):
try:
return self._near_cache[key_data]
except KeyError:
return super(MapFeatNearCache, self)._contains_key_internal(key_data)
def _get_internal(self, key_data):
try:
value = self._near_cache[key_data]
return ImmediateFuture(value)
except KeyError:
future = super(MapFeatNearCache, self)._get_internal(key_data)
return future.continue_with(self._update_cache, key_data)
def _update_cache(self, f, key_data):
self._near_cache.__setitem__(key_data, f.result())
return f.result()
def _get_all_internal(self, partition_to_keys, futures=None):
if futures is None:
futures = []
for key_dic in partition_to_keys.itervalues():
for key in key_dic.keys():
try:
key_data = key_dic[key]
value = self._near_cache[key_data]
future = ImmediateFuture((key, value))
futures.append(future)
del key_dic[key]
except KeyError:
pass
return super(MapFeatNearCache, self)._get_all_internal(partition_to_keys, futures)
def _try_remove_internal(self, key_data, timeout):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._try_remove_internal(key_data, timeout)
def _try_put_internal(self, key_data, value_data, timeout):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._try_put_internal(key_data, value_data, timeout)
def _set_internal(self, key_data, value_data, ttl):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._set_internal(key_data, value_data, ttl)
def _replace_internal(self, key_data, value_data):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._replace_internal(key_data, value_data)
def _replace_if_same_internal(self, key_data, old_value_data, new_value_data):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._replace_if_same_internal(key_data, old_value_data, new_value_data)
def _remove_internal(self, key_data):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._remove_internal(key_data)
def _remove_if_same_internal_(self, key_data, value_data):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._remove_if_same_internal_(key_data, value_data)
def _put_transient_internal(self, key_data, value_data, ttl):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._put_transient_internal(key_data, value_data, ttl)
def _put_internal(self, key_data, value_data, ttl):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._put_internal(key_data, value_data, ttl)
def _put_if_absent_internal(self, key_data, value_data, ttl):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._put_if_absent_internal(key_data, value_data, ttl)
def _load_all_internal(self, key_data_list, replace_existing_values):
self._invalidate_cache_batch(key_data_list)
return super(MapFeatNearCache, self)._load_all_internal(key_data_list, replace_existing_values)
def _execute_on_key_internal(self, key_data, entry_processor):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._execute_on_key_internal(key_data, entry_processor)
def _evict_internal(self, key_data):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._evict_internal(key_data)
def _delete_internal(self, key_data):
self._invalidate_cache(key_data)
return super(MapFeatNearCache, self)._delete_internal(key_data)
def create_near_cache(serialization_service, near_cache_config):
return NearCache(serialization_service,
near_cache_config.in_memory_format,
near_cache_config.time_to_live_seconds,
near_cache_config.max_idle_seconds,
near_cache_config.invalidate_on_change,
near_cache_config.eviction_policy,
near_cache_config.eviction_max_size,
near_cache_config.eviction_sampling_count,
near_cache_config.eviction_sampling_pool_size)
def create_map_proxy(client, service_name, name, **kwargs):
near_cache_config = client.config.near_cache_configs.get(name, None)
if near_cache_config is None:
return Map(client=client, service_name=service_name, name=name)
else:
return MapFeatNearCache(client=client, service_name=service_name, name=name)
|
import pytest
from gradgpad import Approach, ResultsProvider
def check_all_values(nested_dictionary):
for key, value in nested_dictionary.items():
if type(value) is dict:
check_all_values(value)
else:
# print(key, ":", value)
if isinstance(value, float):
assert value >= 0.0, f"{key} is negative ({value})"
assert value <= 100.0, f"{key} is larger than 100 ({value})"
@pytest.mark.unit
@pytest.mark.parametrize(
"approach",
Approach.options_excluding(
[Approach.QUALITY_RBF_BALANCED, Approach.CONTINUAL_LEARNING_AUXILIARY]
),
)
def test_should_check_results_are_ok(approach: Approach):
results = ResultsProvider.all(approach)
check_all_values(results)
|
import numpy as np
row = np.arange(0, 10).reshape(1, 10)
episodes = np.vstack([row] * 5)
print('Before indexing:')
print(episodes)
row_indices = []
col_indices = []
row_to_col = {
0: 3,
1: 4,
2: 5,
3: 0,
4: 1
}
for row_index in range(5):
for col_index in range(row_to_col[row_index], row_to_col[row_index]+3):
row_indices.append(row_index)
col_indices.append(col_index)
print('After indexing:')
print(episodes[row_indices, col_indices].reshape(-1, 3)) |
# -*- coding: utf-8 -*-
#
# Ashes documentation build configuration file, created by
# sphinx-quickstart on Thursday June 25 03:02:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
import os
import sys
import sphinx
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_PATH = os.path.abspath(CUR_PATH + '/../')
PACKAGE_PATH = os.path.abspath(CUR_PATH + '/../ashes/')
sys.path.insert(0, PROJECT_PATH)
sys.path.insert(0, PACKAGE_PATH)
# -- General configuration ------------------------------------------------
needs_sphinx = '1.2'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Read the Docs is version 1.2 as of writing
if sphinx.version_info[:2] < (1, 3):
extensions.append('sphinxcontrib.napoleon')
else:
extensions.append('sphinx.ext.napoleon')
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'Ashes'
copyright = u'2015, Mahmoud Hashemi'
author = u'Mahmoud Hashemi'
# |version| and |release|
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
if os.name != 'nt':
today_fmt = '%B %e, %Y'
# Added to the end of all rst files before rendering
rst_epilog = ""
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None)}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_options = {}
#html_theme_path = []
#html_title = None
#html_short_title = None
#html_logo = None
#html_favicon = None
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ashesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Ashes.tex', u'Ashes Documentation',
u'Mahmoud Hashemi', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ashes', u'Ashes Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Ashes', u'Ashes Documentation',
author, 'Ashes', 'One line description of project.',
'Miscellaneous'),
]
|
import sys
import os
import unittest
import uuid
def _create_random_voice(app):
id = uuid.uuid4().hex[:3]
name = 'vn_' + uuid.uuid4().hex[:4]
lang = uuid.uuid4().hex[:2]
acc = uuid.uuid4().hex[:2]
gender = uuid.uuid4().hex[:5]
directory = uuid.uuid4().hex[:5]
with app.app_context():
from app.models.voice import Voice
voice = Voice.new_voice(id, name, lang, acc, gender, directory)
return (id, name, lang, acc, gender, directory)
class VoicesTest(unittest.TestCase):
def setUp(self):
from integrationtest import setup_app
self.app, self.client, self.db = setup_app()
def tearDown(self):
# delete the database data
with self.app.app_context():
from app.models.voice import Voice
self.db.session.query(Voice).delete()
self.db.session.commit()
def test_voices_with_empty_db(self):
# act
resp = self.client.post('/voices')
# assert
self.assertEqual(resp.status_code, 204)
def test_voices_with_non_empty_db(self):
id, name, lang, acc, gender, dir = _create_random_voice(self.app)
# act
resp = self.client.post('/voices')
# assert
self.assertEqual(resp.status_code, 200)
self.assertIn('voices', resp.json)
self.assertEqual(id, resp.json['voices'][0]['id'])
self.assertEqual(name, resp.json['voices'][0]['name'])
self.assertEqual(lang, resp.json['voices'][0]['language'])
self.assertEqual(acc, resp.json['voices'][0]['accent'])
self.assertEqual(gender, resp.json['voices'][0]['gender'])
with self.app.app_context():
from app.models.voice import Voice
self.assertEqual(dir, Voice.query.get(id).directory)
def test_voices_params_with_acc_and_no_lang(self):
acc = uuid.uuid4().hex[:2]
# act
resp = self.client.post('/voices', json={'accent': acc})
# assert
self.assertEqual(resp.status_code, 400, resp.data)
self.assertIn('message', resp.json)
self.assertIn('language has to be provided', resp.json['message'])
def test_voices_params_with_wrong_gender(self):
gender = uuid.uuid4().hex[:5]
# act
resp = self.client.post('/voices', json={'gender': gender})
# assert
self.assertEqual(resp.status_code, 422, resp.data)
self.assertIn('message', resp.json)
self.assertIn('Not a valid choice', resp.json['message'])
self.assertIn('gender', resp.json['message'])
def test_voicedetails_with_valid_voice(self):
id, name, lang, acc, gender, dir = _create_random_voice(self.app)
# act
resp = self.client.get('/voices/' + id)
# assert
self.assertEqual(resp.status_code, 200)
self.assertIn('id', resp.json)
self.assertIn('name', resp.json)
self.assertIn('language', resp.json)
self.assertIn('accent', resp.json)
self.assertIn('gender', resp.json)
self.assertEqual(id, resp.json['id'])
self.assertEqual(name, resp.json['name'])
self.assertEqual(lang, resp.json['language'])
self.assertEqual(acc, resp.json['accent'])
self.assertEqual(gender, resp.json['gender'])
def test_voicedetails_with_invalid_voice(self):
id, name, lang, acc, gender, dir = _create_random_voice(self.app)
# act
resp = self.client.get('/voices/' + id + 'invalid')
# assert
self.assertEqual(resp.status_code, 404)
self.assertIn('message', resp.json)
self.assertIn('could not be found', resp.json['message'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.